repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow | airflow-main/airflow/providers/google/cloud/example_dags/example_cloud_task.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that sense a cloud task queue being empty.
This DAG relies on the following OS environment variables
* GCP_PROJECT_ID - Google Cloud project where the Compute Engine instance exists.
* GCP_ZONE - Google Cloud zone where the cloud task queue exists.
* QUEUE_NAME - Name of the cloud task queue.
"""
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.sensors.tasks import TaskQueueEmptySensor
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_ZONE = os.environ.get("GCE_ZONE", "europe-west1-b")
QUEUE_NAME = os.environ.get("GCP_QUEUE_NAME", "testqueue")
with models.DAG(
"example_gcp_cloud_tasks_sensor",
start_date=datetime(2022, 8, 8),
catchup=False,
tags=["example"],
) as dag:
# [START cloud_tasks_empty_sensor]
gcp_cloud_tasks_sensor = TaskQueueEmptySensor(
project_id=GCP_PROJECT_ID,
location=GCP_ZONE,
task_id="gcp_sense_cloud_tasks_empty",
queue_name=QUEUE_NAME,
)
# [END cloud_tasks_empty_sensor]
| 1,896 | 34.12963 | 81 | py |
airflow | airflow-main/airflow/providers/google/cloud/example_dags/example_facebook_ads_to_gcs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use FacebookAdsReportToGcsOperator.
"""
from __future__ import annotations
import os
from datetime import datetime
from facebook_business.adobjects.adsinsights import AdsInsights
from airflow import models
from airflow.models.baseoperator import chain
from airflow.providers.google.cloud.operators.bigquery import (
BigQueryCreateEmptyDatasetOperator,
BigQueryCreateEmptyTableOperator,
BigQueryDeleteDatasetOperator,
BigQueryInsertJobOperator,
)
from airflow.providers.google.cloud.operators.gcs import GCSCreateBucketOperator, GCSDeleteBucketOperator
from airflow.providers.google.cloud.transfers.facebook_ads_to_gcs import FacebookAdsReportToGcsOperator
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
# [START howto_GCS_env_variables]
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "free-tier-1997")
GCS_BUCKET = os.environ.get("GCS_BUCKET", "airflow_bucket_fb")
GCS_OBJ_PATH = os.environ.get("GCS_OBJ_PATH", "Temp/this_is_my_report_csv.csv")
GCS_CONN_ID = os.environ.get("GCS_CONN_ID", "google_cloud_default")
DATASET_NAME = os.environ.get("DATASET_NAME", "airflow_test_dataset")
TABLE_NAME = os.environ.get("FB_TABLE_NAME", "airflow_test_datatable")
# [END howto_GCS_env_variables]
# [START howto_FB_ADS_variables]
FIELDS = [
AdsInsights.Field.campaign_name,
AdsInsights.Field.campaign_id,
AdsInsights.Field.ad_id,
AdsInsights.Field.clicks,
AdsInsights.Field.impressions,
]
PARAMETERS = {"level": "ad", "date_preset": "yesterday"}
# [END howto_FB_ADS_variables]
with models.DAG(
"example_facebook_ads_to_gcs",
start_date=datetime(2021, 1, 1),
catchup=False,
) as dag:
create_bucket = GCSCreateBucketOperator(
task_id="create_bucket",
bucket_name=GCS_BUCKET,
project_id=GCP_PROJECT_ID,
)
create_dataset = BigQueryCreateEmptyDatasetOperator(
task_id="create_dataset",
dataset_id=DATASET_NAME,
)
create_table = BigQueryCreateEmptyTableOperator(
task_id="create_table",
dataset_id=DATASET_NAME,
table_id=TABLE_NAME,
schema_fields=[
{"name": "campaign_name", "type": "STRING", "mode": "NULLABLE"},
{"name": "campaign_id", "type": "STRING", "mode": "NULLABLE"},
{"name": "ad_id", "type": "STRING", "mode": "NULLABLE"},
{"name": "clicks", "type": "STRING", "mode": "NULLABLE"},
{"name": "impressions", "type": "STRING", "mode": "NULLABLE"},
],
)
# [START howto_operator_facebook_ads_to_gcs]
run_operator = FacebookAdsReportToGcsOperator(
task_id="run_fetch_data",
owner="airflow",
bucket_name=GCS_BUCKET,
parameters=PARAMETERS,
fields=FIELDS,
gcp_conn_id=GCS_CONN_ID,
object_name=GCS_OBJ_PATH,
)
# [END howto_operator_facebook_ads_to_gcs]
load_csv = GCSToBigQueryOperator(
task_id="gcs_to_bq_example",
bucket=GCS_BUCKET,
source_objects=[GCS_OBJ_PATH],
destination_project_dataset_table=f"{DATASET_NAME}.{TABLE_NAME}",
write_disposition="WRITE_TRUNCATE",
)
read_data_from_gcs_many_chunks = BigQueryInsertJobOperator(
task_id="read_data_from_gcs_many_chunks",
configuration={
"query": {
"query": f"SELECT COUNT(*) FROM `{GCP_PROJECT_ID}.{DATASET_NAME}.{TABLE_NAME}`",
"useLegacySql": False,
}
},
)
delete_bucket = GCSDeleteBucketOperator(
task_id="delete_bucket",
bucket_name=GCS_BUCKET,
)
delete_dataset = BigQueryDeleteDatasetOperator(
task_id="delete_dataset",
project_id=GCP_PROJECT_ID,
dataset_id=DATASET_NAME,
delete_contents=True,
)
chain(
create_bucket,
create_dataset,
create_table,
run_operator,
load_csv,
read_data_from_gcs_many_chunks,
delete_bucket,
delete_dataset,
)
| 4,823 | 32.971831 | 105 | py |
airflow | airflow-main/airflow/providers/google/cloud/example_dags/example_dataflow_flex_template.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG for Google Cloud Dataflow service
"""
from __future__ import annotations
import os
from datetime import datetime
from airflow import models
from airflow.providers.google.cloud.operators.dataflow import DataflowStartFlexTemplateOperator
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
DATAFLOW_FLEX_TEMPLATE_JOB_NAME = os.environ.get(
"GCP_DATAFLOW_FLEX_TEMPLATE_JOB_NAME", "dataflow-flex-template"
)
# For simplicity we use the same topic name as the subscription name.
PUBSUB_FLEX_TEMPLATE_TOPIC = os.environ.get(
"GCP_DATAFLOW_PUBSUB_FLEX_TEMPLATE_TOPIC", "dataflow-flex-template"
)
PUBSUB_FLEX_TEMPLATE_SUBSCRIPTION = PUBSUB_FLEX_TEMPLATE_TOPIC
GCS_FLEX_TEMPLATE_TEMPLATE_PATH = os.environ.get(
"GCP_DATAFLOW_GCS_FLEX_TEMPLATE_TEMPLATE_PATH",
"gs://INVALID BUCKET NAME/samples/dataflow/templates/streaming-beam-sql.json",
)
BQ_FLEX_TEMPLATE_DATASET = os.environ.get("GCP_DATAFLOW_BQ_FLEX_TEMPLATE_DATASET", "airflow_dataflow_samples")
BQ_FLEX_TEMPLATE_LOCATION = os.environ.get("GCP_DATAFLOW_BQ_FLEX_TEMPLATE_LOCATION>", "us-west1")
with models.DAG(
dag_id="example_gcp_dataflow_flex_template_java",
start_date=datetime(2021, 1, 1),
catchup=False,
) as dag_flex_template:
# [START howto_operator_start_template_job]
start_flex_template = DataflowStartFlexTemplateOperator(
task_id="start_flex_template_streaming_beam_sql",
project_id=GCP_PROJECT_ID,
body={
"launchParameter": {
"containerSpecGcsPath": GCS_FLEX_TEMPLATE_TEMPLATE_PATH,
"jobName": DATAFLOW_FLEX_TEMPLATE_JOB_NAME,
"parameters": {
"inputSubscription": PUBSUB_FLEX_TEMPLATE_SUBSCRIPTION,
"outputTable": f"{GCP_PROJECT_ID}:{BQ_FLEX_TEMPLATE_DATASET}.streaming_beam_sql",
},
}
},
do_xcom_push=True,
location=BQ_FLEX_TEMPLATE_LOCATION,
)
# [END howto_operator_start_template_job]
| 2,801 | 39.028571 | 110 | py |
airflow | airflow-main/airflow/providers/ftp/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.4.2"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-ftp:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,528 | 35.404762 | 112 | py |
airflow | airflow-main/airflow/providers/ftp/operators/ftp.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains FTP operator."""
from __future__ import annotations
import os
from functools import cached_property
from pathlib import Path
from typing import Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.ftp.hooks.ftp import FTPHook, FTPSHook
class FTPOperation:
"""Operation that can be used with FTP."""
PUT = "put"
GET = "get"
class FTPFileTransmitOperator(BaseOperator):
"""
FTPFileTransmitOperator for transferring files from remote host to local or vice a versa.
This operator uses an FTPHook to open ftp transport channel that serve as basis for file transfer.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FTPFileTransmitOperator`
:param ftp_conn_id: :ref:`ftp connection id<howto/connection:ftp>`
from airflow Connections.
:param local_filepath: local file path to get or put. (templated)
:param remote_filepath: remote file path to get or put. (templated)
:param operation: specify operation 'get' or 'put', defaults to put
:param create_intermediate_dirs: create missing intermediate directories when
copying from remote to local and vice-versa. Default is False.
Example: The following task would copy ``file.txt`` to the remote host
at ``/tmp/tmp1/tmp2/`` while creating ``tmp``,``tmp1`` and ``tmp2`` if they
don't exist. If the ``create_intermediate_dirs`` parameter is not passed it would error
as the directory does not exist. ::
put_file = FTPFileTransmitOperator(
task_id="test_ftp",
ftp_conn_id="ftp_default",
local_filepath="/tmp/file.txt",
remote_filepath="/tmp/tmp1/tmp2/file.txt",
operation="put",
create_intermediate_dirs=True,
dag=dag
)
"""
template_fields: Sequence[str] = ("local_filepath", "remote_filepath")
def __init__(
self,
*,
ftp_conn_id: str = "ftp_default",
local_filepath: str | list[str],
remote_filepath: str | list[str],
operation: str = FTPOperation.PUT,
create_intermediate_dirs: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.ftp_conn_id = ftp_conn_id
self.operation = operation
self.create_intermediate_dirs = create_intermediate_dirs
self.local_filepath = local_filepath
self.remote_filepath = remote_filepath
@cached_property
def hook(self) -> FTPHook:
"""Create and return an FTPHook."""
return FTPHook(ftp_conn_id=self.ftp_conn_id)
def execute(self, context: Any) -> str | list[str] | None:
file_msg = None
if isinstance(self.local_filepath, str):
local_filepath_array = [self.local_filepath]
else:
local_filepath_array = self.local_filepath
if isinstance(self.remote_filepath, str):
remote_filepath_array = [self.remote_filepath]
else:
remote_filepath_array = self.remote_filepath
if len(local_filepath_array) != len(remote_filepath_array):
raise ValueError(
f"{len(local_filepath_array)} paths in local_filepath "
f"!= {len(remote_filepath_array)} paths in remote_filepath"
)
if self.operation.lower() not in [FTPOperation.GET, FTPOperation.PUT]:
raise TypeError(
f"Unsupported operation value {self.operation}, "
f"expected {FTPOperation.GET} or {FTPOperation.PUT}."
)
for _local_filepath, _remote_filepath in zip(local_filepath_array, remote_filepath_array):
if self.operation.lower() == FTPOperation.GET:
local_folder = os.path.dirname(_local_filepath)
if self.create_intermediate_dirs:
Path(local_folder).mkdir(parents=True, exist_ok=True)
file_msg = f"from {_remote_filepath} to {_local_filepath}"
self.log.info("Starting to transfer %s", file_msg)
self.hook.retrieve_file(_remote_filepath, _local_filepath)
else:
remote_folder = os.path.dirname(_remote_filepath)
if self.create_intermediate_dirs:
self.hook.create_directory(remote_folder)
file_msg = f"from {_local_filepath} to {_remote_filepath}"
self.log.info("Starting to transfer file %s", file_msg)
self.hook.store_file(_remote_filepath, _local_filepath)
return self.local_filepath
class FTPSFileTransmitOperator(FTPFileTransmitOperator):
"""
FTPSFileTransmitOperator for transferring files from remote host to local or vice a versa.
This operator uses an FTPSHook to open ftps transport channel that serve as basis for file transfer.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FTPSFileTransmitOperator`
"""
@cached_property
def hook(self) -> FTPSHook:
"""Create and return an FTPSHook."""
return FTPSHook(ftp_conn_id=self.ftp_conn_id)
| 6,073 | 38.441558 | 104 | py |
airflow | airflow-main/airflow/providers/ftp/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/ftp/hooks/ftp.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
import ftplib
import os.path
from typing import Any, Callable
from airflow.hooks.base import BaseHook
class FTPHook(BaseHook):
"""
Interact with FTP.
Errors that may occur throughout but should be handled downstream.
You can specify mode for data transfers in the extra field of your
connection as ``{"passive": "true"}``.
:param ftp_conn_id: The :ref:`ftp connection id <howto/connection:ftp>`
reference.
"""
conn_name_attr = "ftp_conn_id"
default_conn_name = "ftp_default"
conn_type = "ftp"
hook_name = "FTP"
def __init__(self, ftp_conn_id: str = default_conn_name) -> None:
super().__init__()
self.ftp_conn_id = ftp_conn_id
self.conn: ftplib.FTP | None = None
def __enter__(self):
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
if self.conn is not None:
self.close_conn()
def get_conn(self) -> ftplib.FTP:
"""Returns a FTP connection object."""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
pasv = params.extra_dejson.get("passive", True)
self.conn = ftplib.FTP(params.host, params.login, params.password)
self.conn.set_pasv(pasv)
return self.conn
def close_conn(self):
"""Closes the connection; an error will occur if the connection was never opened."""
conn = self.conn
conn.quit()
self.conn = None
def describe_directory(self, path: str) -> dict:
"""
Return a dictionary of {filename: {attributes}} for all files on a remote system which supports MLSD.
:param path: full path to the remote directory
"""
conn = self.get_conn()
conn.cwd(path)
files = dict(conn.mlsd())
return files
def list_directory(self, path: str) -> list[str]:
"""
Returns a list of files on the remote system.
:param path: full path to the remote directory to list
"""
conn = self.get_conn()
conn.cwd(path)
files = conn.nlst()
return files
def create_directory(self, path: str) -> None:
"""
Creates a directory on the remote system.
:param path: full path to the remote directory to create
"""
conn = self.get_conn()
conn.mkd(path)
def delete_directory(self, path: str) -> None:
"""
Deletes a directory on the remote system.
:param path: full path to the remote directory to delete
"""
conn = self.get_conn()
conn.rmd(path)
def retrieve_file(
self,
remote_full_path: str,
local_full_path_or_buffer: Any,
callback: Callable | None = None,
block_size: int = 8192,
) -> None:
"""
Transfers the remote file to a local location.
If local_full_path_or_buffer is a string path, the file will be put
at that location; if it is a file-like buffer, the file will
be written to the buffer but not closed.
:param remote_full_path: full path to the remote file
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:param callback: callback which is called each time a block of data
is read. if you do not use a callback, these blocks will be written
to the file or buffer passed in. if you do pass in a callback, note
that writing to a file or buffer will need to be handled inside the
callback.
[default: output_handle.write()]
:param block_size: file is transferred in chunks of default size 8192
or as set by user
.. code-block:: python
hook = FTPHook(ftp_conn_id="my_conn")
remote_path = "/path/to/remote/file"
local_path = "/path/to/local/file"
# with a custom callback (in this case displaying progress on each read)
def print_progress(percent_progress):
self.log.info("Percent Downloaded: %s%%" % percent_progress)
total_downloaded = 0
total_file_size = hook.get_size(remote_path)
output_handle = open(local_path, "wb")
def write_to_file_with_progress(data):
total_downloaded += len(data)
output_handle.write(data)
percent_progress = (total_downloaded / total_file_size) * 100
print_progress(percent_progress)
hook.retrieve_file(remote_path, None, callback=write_to_file_with_progress)
# without a custom callback data is written to the local_path
hook.retrieve_file(remote_path, local_path)
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, str)
# without a callback, default to writing to a user-provided file or
# file-like buffer
if not callback:
if is_path:
output_handle = open(local_full_path_or_buffer, "wb")
else:
output_handle = local_full_path_or_buffer
callback = output_handle.write
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
self.log.info("Retrieving file from FTP: %s", remote_full_path)
conn.retrbinary(f"RETR {remote_file_name}", callback, block_size)
self.log.info("Finished retrieving file from FTP: %s", remote_full_path)
if is_path and output_handle:
output_handle.close()
def store_file(
self, remote_full_path: str, local_full_path_or_buffer: Any, block_size: int = 8192
) -> None:
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location; if it is a file-like buffer, the file will
be read from the buffer but not closed.
:param remote_full_path: full path to the remote file
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:param block_size: file is transferred in chunks of default size 8192
or as set by user
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, str)
if is_path:
input_handle = open(local_full_path_or_buffer, "rb")
else:
input_handle = local_full_path_or_buffer
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
conn.storbinary(f"STOR {remote_file_name}", input_handle, block_size)
if is_path:
input_handle.close()
def delete_file(self, path: str) -> None:
"""
Removes a file on the FTP Server.
:param path: full path to the remote file
"""
conn = self.get_conn()
conn.delete(path)
def rename(self, from_name: str, to_name: str) -> str:
"""
Rename a file.
:param from_name: rename file from name
:param to_name: rename file to name
"""
conn = self.get_conn()
return conn.rename(from_name, to_name)
def get_mod_time(self, path: str) -> datetime.datetime:
"""
Returns a datetime object representing the last time the file was modified.
:param path: remote file path
"""
conn = self.get_conn()
ftp_mdtm = conn.sendcmd("MDTM " + path)
time_val = ftp_mdtm[4:]
# time_val optionally has microseconds
try:
return datetime.datetime.strptime(time_val, "%Y%m%d%H%M%S.%f")
except ValueError:
return datetime.datetime.strptime(time_val, "%Y%m%d%H%M%S")
def get_size(self, path: str) -> int | None:
"""
Returns the size of a file (in bytes).
:param path: remote file path
"""
conn = self.get_conn()
size = conn.size(path)
return int(size) if size else None
def test_connection(self) -> tuple[bool, str]:
"""Test the FTP connection by calling path with directory."""
try:
conn = self.get_conn()
conn.pwd
return True, "Connection successfully tested"
except Exception as e:
return False, str(e)
class FTPSHook(FTPHook):
"""Interact with FTPS."""
def get_conn(self) -> ftplib.FTP:
"""Returns a FTPS connection object."""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
pasv = params.extra_dejson.get("passive", True)
if params.port:
ftplib.FTP_TLS.port = params.port
self.conn = ftplib.FTP_TLS(params.host, params.login, params.password)
self.conn.set_pasv(pasv)
return self.conn
| 9,788 | 32.523973 | 109 | py |
airflow | airflow-main/airflow/providers/ftp/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/ftp/sensors/ftp.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import ftplib
import re
from typing import TYPE_CHECKING, Sequence
from airflow.providers.ftp.hooks.ftp import FTPHook, FTPSHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class FTPSensor(BaseSensorOperator):
"""
Waits for a file or directory to be present on FTP.
:param path: Remote file or directory path
:param fail_on_transient_errors: Fail on all errors,
including 4xx transient errors. Default True.
:param ftp_conn_id: The :ref:`ftp connection id <howto/connection:ftp>`
reference to run the sensor against.
"""
template_fields: Sequence[str] = ("path",)
"""Errors that are transient in nature, and where action can be retried"""
transient_errors = [421, 425, 426, 434, 450, 451, 452]
error_code_pattern = re.compile(r"([\d]+)")
def __init__(
self, *, path: str, ftp_conn_id: str = "ftp_default", fail_on_transient_errors: bool = True, **kwargs
) -> None:
super().__init__(**kwargs)
self.path = path
self.ftp_conn_id = ftp_conn_id
self.fail_on_transient_errors = fail_on_transient_errors
def _create_hook(self) -> FTPHook:
"""Return connection hook."""
return FTPHook(ftp_conn_id=self.ftp_conn_id)
def _get_error_code(self, e):
"""Extract error code from ftp exception."""
try:
matches = self.error_code_pattern.match(str(e))
code = int(matches.group(0))
return code
except ValueError:
return e
def poke(self, context: Context) -> bool:
with self._create_hook() as hook:
self.log.info("Poking for %s", self.path)
try:
mod_time = hook.get_mod_time(self.path)
self.log.info("Found File %s last modified: %s", str(self.path), str(mod_time))
except ftplib.error_perm as e:
self.log.error("Ftp error encountered: %s", str(e))
error_code = self._get_error_code(e)
if (error_code != 550) and (
self.fail_on_transient_errors or (error_code not in self.transient_errors)
):
raise e
return False
return True
class FTPSSensor(FTPSensor):
"""Waits for a file or directory to be present on FTP over SSL."""
def _create_hook(self) -> FTPHook:
"""Return connection hook."""
return FTPSHook(ftp_conn_id=self.ftp_conn_id)
| 3,364 | 33.690722 | 109 | py |
airflow | airflow-main/airflow/providers/ftp/sensors/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/redis/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-redis:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,530 | 35.452381 | 114 | py |
airflow | airflow-main/airflow/providers/redis/operators/redis_publish.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.redis.hooks.redis import RedisHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class RedisPublishOperator(BaseOperator):
"""
Publish a message to Redis.
:param channel: redis channel to which the message is published (templated)
:param message: the message to publish (templated)
:param redis_conn_id: redis connection to use
"""
template_fields: Sequence[str] = ("channel", "message")
def __init__(self, *, channel: str, message: str, redis_conn_id: str = "redis_default", **kwargs) -> None:
super().__init__(**kwargs)
self.redis_conn_id = redis_conn_id
self.channel = channel
self.message = message
def execute(self, context: Context) -> None:
"""
Publish the message to Redis channel.
:param context: the context object
"""
redis_hook = RedisHook(redis_conn_id=self.redis_conn_id)
self.log.info("Sending message %s to Redis on channel %s", self.message, self.channel)
result = redis_hook.get_conn().publish(channel=self.channel, message=self.message)
self.log.info("Result of publishing %s", result)
| 2,097 | 33.966667 | 110 | py |
airflow | airflow-main/airflow/providers/redis/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/redis/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/redis/hooks/redis.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""RedisHook module."""
from __future__ import annotations
from redis import Redis
from airflow.hooks.base import BaseHook
class RedisHook(BaseHook):
"""
Wrapper for connection to interact with Redis in-memory data structure store.
You can set your db in the extra field of your connection as ``{"db": 3}``.
Also you can set ssl parameters as:
``{"ssl": true, "ssl_cert_reqs": "require", "ssl_cert_file": "/path/to/cert.pem", etc}``.
"""
conn_name_attr = "redis_conn_id"
default_conn_name = "redis_default"
conn_type = "redis"
hook_name = "Redis"
def __init__(self, redis_conn_id: str = default_conn_name) -> None:
"""
Prepares hook to connect to a Redis database.
:param conn_id: the name of the connection that has the parameters
we need to connect to Redis.
"""
super().__init__()
self.redis_conn_id = redis_conn_id
self.redis = None
self.host = None
self.port = None
self.password = None
self.db = None
def get_conn(self):
"""Returns a Redis connection."""
conn = self.get_connection(self.redis_conn_id)
self.host = conn.host
self.port = conn.port
self.password = None if str(conn.password).lower() in ["none", "false", ""] else conn.password
self.db = conn.extra_dejson.get("db")
# check for ssl parameters in conn.extra
ssl_arg_names = [
"ssl",
"ssl_cert_reqs",
"ssl_ca_certs",
"ssl_keyfile",
"ssl_cert_file",
"ssl_check_hostname",
]
ssl_args = {name: val for name, val in conn.extra_dejson.items() if name in ssl_arg_names}
if not self.redis:
self.log.debug(
'Initializing redis object for conn_id "%s" on %s:%s:%s',
self.redis_conn_id,
self.host,
self.port,
self.db,
)
self.redis = Redis(host=self.host, port=self.port, password=self.password, db=self.db, **ssl_args)
return self.redis
| 2,945 | 33.658824 | 110 | py |
airflow | airflow-main/airflow/providers/redis/sensors/redis_pub_sub.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.providers.redis.hooks.redis import RedisHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class RedisPubSubSensor(BaseSensorOperator):
"""
Redis sensor for reading a message from pub sub channels.
:param channels: The channels to be subscribed to (templated)
:param redis_conn_id: the redis connection id
"""
template_fields: Sequence[str] = ("channels",)
ui_color = "#f0eee4"
def __init__(self, *, channels: list[str] | str, redis_conn_id: str, **kwargs) -> None:
super().__init__(**kwargs)
self.channels = channels
self.redis_conn_id = redis_conn_id
self.pubsub = RedisHook(redis_conn_id=self.redis_conn_id).get_conn().pubsub()
self.pubsub.subscribe(self.channels)
def poke(self, context: Context) -> bool:
"""
Check for message on subscribed channels and write to xcom the message with key ``message``.
An example of message ``{'type': 'message', 'pattern': None, 'channel': b'test', 'data': b'hello'}``
:param context: the context object
:return: ``True`` if message (with type 'message') is available or ``False`` if not
"""
self.log.info("RedisPubSubSensor checking for message on channels: %s", self.channels)
message = self.pubsub.get_message()
self.log.info("Message %s from channel %s", message, self.channels)
# Process only message types
if message and message["type"] == "message":
context["ti"].xcom_push(key="message", value=message)
self.pubsub.unsubscribe(self.channels)
return True
return False
| 2,586 | 35.957143 | 108 | py |
airflow | airflow-main/airflow/providers/redis/sensors/redis_key.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.providers.redis.hooks.redis import RedisHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class RedisKeySensor(BaseSensorOperator):
"""Checks for the existence of a key in a Redis."""
template_fields: Sequence[str] = ("key",)
ui_color = "#f0eee4"
def __init__(self, *, key: str, redis_conn_id: str, **kwargs) -> None:
super().__init__(**kwargs)
self.redis_conn_id = redis_conn_id
self.key = key
def poke(self, context: Context) -> bool:
self.log.info("Sensor checks for existence of key: %s", self.key)
return RedisHook(self.redis_conn_id).get_conn().exists(self.key)
| 1,585 | 35.883721 | 74 | py |
airflow | airflow-main/airflow/providers/redis/sensors/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/exasol/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-exasol:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,531 | 35.47619 | 115 | py |
airflow | airflow-main/airflow/providers/exasol/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/exasol/operators/exasol.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Sequence
from airflow.providers.common.sql.operators.sql import SQLExecuteQueryOperator
from airflow.providers.exasol.hooks.exasol import exasol_fetch_all_handler
class ExasolOperator(SQLExecuteQueryOperator):
"""
Executes sql code in a specific Exasol database.
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
template references are recognized by str ending in '.sql'
:param exasol_conn_id: reference to a specific Exasol database
:param autocommit: if True, each command is automatically committed.
(default value: False)
:param parameters: (optional) the parameters to render the SQL query with.
:param schema: (optional) name of the schema which overwrite defined one in connection
:param handler: (optional) handler to process the results of the query
"""
template_fields: Sequence[str] = ("sql",)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
ui_color = "#ededed"
def __init__(
self,
*,
exasol_conn_id: str = "exasol_default",
schema: str | None = None,
handler=exasol_fetch_all_handler,
**kwargs,
) -> None:
if schema is not None:
hook_params = kwargs.pop("hook_params", {})
kwargs["hook_params"] = {"schema": schema, **hook_params}
super().__init__(conn_id=exasol_conn_id, handler=handler, **kwargs)
| 2,348 | 39.5 | 90 | py |
airflow | airflow-main/airflow/providers/exasol/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/exasol/hooks/exasol.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from contextlib import closing
from typing import Any, Callable, Iterable, Mapping, Sequence
import pandas as pd
import pyexasol
from pyexasol import ExaConnection, ExaStatement
from airflow.providers.common.sql.hooks.sql import DbApiHook, return_single_query_results
class ExasolHook(DbApiHook):
"""Interact with Exasol.
You can specify the pyexasol ``compression``, ``encryption``, ``json_lib``
and ``client_name`` parameters in the extra field of your connection
as ``{"compression": True, "json_lib": "rapidjson", etc}``.
See `pyexasol reference
<https://github.com/badoo/pyexasol/blob/master/docs/REFERENCE.md#connect>`_
for more details.
"""
conn_name_attr = "exasol_conn_id"
default_conn_name = "exasol_default"
conn_type = "exasol"
hook_name = "Exasol"
supports_autocommit = True
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.schema = kwargs.pop("schema", None)
def get_conn(self) -> ExaConnection:
conn_id = getattr(self, self.conn_name_attr)
conn = self.get_connection(conn_id)
conn_args = dict(
dsn=f"{conn.host}:{conn.port}",
user=conn.login,
password=conn.password,
schema=self.schema or conn.schema,
)
# check for parameters in conn.extra
for arg_name, arg_val in conn.extra_dejson.items():
if arg_name in ["compression", "encryption", "json_lib", "client_name"]:
conn_args[arg_name] = arg_val
conn = pyexasol.connect(**conn_args)
return conn
def get_pandas_df(self, sql: str, parameters: dict | None = None, **kwargs) -> pd.DataFrame:
"""Execute the SQL and return a Pandas dataframe.
:param sql: The sql statement to be executed (str) or a list of
sql statements to execute.
:param parameters: The parameters to render the SQL query with.
Other keyword arguments are all forwarded into
``pyexasol.ExaConnection.export_to_pandas``.
"""
with closing(self.get_conn()) as conn:
df = conn.export_to_pandas(sql, query_params=parameters, **kwargs)
return df
def get_records(
self,
sql: str | list[str],
parameters: Iterable | Mapping | None = None,
) -> list[dict | tuple[Any, ...]]:
"""Execute the SQL and return a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with closing(self.get_conn()) as conn:
with closing(conn.execute(sql, parameters)) as cur:
return cur.fetchall()
def get_first(self, sql: str | list[str], parameters: Iterable | Mapping | None = None) -> Any:
"""Execute the SQL and return the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with closing(self.get_conn()) as conn:
with closing(conn.execute(sql, parameters)) as cur:
return cur.fetchone()
def export_to_file(
self,
filename: str,
query_or_table: str,
query_params: dict | None = None,
export_params: dict | None = None,
) -> None:
"""Export data to a file.
:param filename: Path to the file to which the data has to be exported
:param query_or_table: the sql statement to be executed or table name to export
:param query_params: Query parameters passed to underlying ``export_to_file``
method of :class:`~pyexasol.connection.ExaConnection`.
:param export_params: Extra parameters passed to underlying ``export_to_file``
method of :class:`~pyexasol.connection.ExaConnection`.
"""
self.log.info("Getting data from exasol")
with closing(self.get_conn()) as conn:
conn.export_to_file(
dst=filename,
query_or_table=query_or_table,
query_params=query_params,
export_params=export_params,
)
self.log.info("Data saved to %s", filename)
@staticmethod
def get_description(statement: ExaStatement) -> Sequence[Sequence]:
"""Copied implementation from DB2-API wrapper.
For more info, see
https://github.com/exasol/pyexasol/blob/master/docs/DBAPI_COMPAT.md#db-api-20-wrapper
:param statement: Exasol statement
:return: description sequence of t
"""
cols = []
for k, v in statement.columns().items():
cols.append(
(
k,
v.get("type", None),
v.get("size", None),
v.get("size", None),
v.get("precision", None),
v.get("scale", None),
True,
)
)
return cols
def run(
self,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping | None = None,
handler: Callable | None = None,
split_statements: bool = False,
return_last: bool = True,
) -> Any | list[Any] | None:
"""Run a command or a list of commands.
Pass a list of SQL statements to the SQL parameter to get them to
execute sequentially.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:param parameters: The parameters to render the SQL query with.
:param handler: The result handler which is called with the result of each statement.
:param split_statements: Whether to split a single SQL string into statements and run separately
:param return_last: Whether to return result for only last statement or for all after split
:return: return only result of the LAST SQL expression if handler was provided.
"""
self.descriptions = []
if isinstance(sql, str):
if split_statements:
sql_list: Iterable[str] = self.split_sql_string(sql)
else:
statement = self.strip_sql_string(sql)
sql_list = [statement] if statement.strip() else []
else:
sql_list = sql
if sql_list:
self.log.debug("Executing following statements against Exasol DB: %s", list(sql_list))
else:
raise ValueError("List of SQL statements is empty")
_last_result = None
with closing(self.get_conn()) as conn:
self.set_autocommit(conn, autocommit)
results = []
for sql_statement in sql_list:
with closing(conn.execute(sql_statement, parameters)) as exa_statement:
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
if handler is not None:
result = handler(exa_statement)
if return_single_query_results(sql, return_last, split_statements):
_last_result = result
_last_columns = self.get_description(exa_statement)
else:
results.append(result)
self.descriptions.append(self.get_description(exa_statement))
self.log.info("Rows affected: %s", exa_statement.rowcount)
# If autocommit was set to False or db does not support autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
if handler is None:
return None
if return_single_query_results(sql, return_last, split_statements):
self.descriptions = [_last_columns]
return _last_result
else:
return results
def set_autocommit(self, conn, autocommit: bool) -> None:
"""Set the autocommit flag on the connection.
:param conn: Connection to set autocommit setting to.
:param autocommit: The autocommit setting to set.
"""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr),
)
conn.set_autocommit(autocommit)
def get_autocommit(self, conn) -> bool:
"""Get autocommit setting for the provided connection.
:param conn: Connection to get autocommit setting from.
:return: connection autocommit setting. True if ``autocommit`` is set
to True on the connection. False if it is either not set, set to
False, or the connection does not support auto-commit.
"""
autocommit = conn.attr.get("autocommit")
if autocommit is None:
autocommit = super().get_autocommit(conn)
return autocommit
@staticmethod
def _serialize_cell(cell, conn=None) -> Any:
"""Override to disable cell serialization.
Exasol will adapt all arguments to the ``execute()`` method internally,
hence we return cell without any conversion.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The cell
"""
return cell
def exasol_fetch_all_handler(statement: ExaStatement) -> list[tuple] | None:
if statement.result_type == "resultSet":
return statement.fetchall()
return None
def exasol_fetch_one_handler(statement: ExaStatement) -> list[tuple] | None:
if statement.result_type == "resultSet":
return statement.fetchone()
return None
| 10,925 | 38.444043 | 104 | py |
airflow | airflow-main/airflow/providers/common/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/common/sql/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "1.6.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-common-sql:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,535 | 35.571429 | 119 | py |
airflow | airflow-main/airflow/providers/common/sql/operators/sql.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import ast
import re
from functools import cached_property
from typing import TYPE_CHECKING, Any, Callable, Iterable, Mapping, NoReturn, Sequence, SupportsAbs
from airflow.exceptions import AirflowException, AirflowFailException
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator, SkipMixin
from airflow.providers.common.sql.hooks.sql import DbApiHook, fetch_all_handler, return_single_query_results
from airflow.utils.helpers import merge_dicts
if TYPE_CHECKING:
from airflow.providers.openlineage.extractors import OperatorLineage
from airflow.utils.context import Context
def _convert_to_float_if_possible(s: str) -> float | str:
try:
return float(s)
except (ValueError, TypeError):
return s
def _parse_boolean(val: str) -> str | bool:
"""Try to parse a string into boolean.
Raises ValueError if the input is not a valid true- or false-like string value.
"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return True
if val in ("n", "no", "f", "false", "off", "0"):
return False
raise ValueError(f"{val!r} is not a boolean-like string value")
def _get_failed_checks(checks, col=None):
"""
IMPORTANT!!! Keep it for compatibility with released 8.4.0 version of google provider.
Unfortunately the provider used _get_failed_checks and parse_boolean as imports and we should
keep those methods to avoid 8.4.0 version from failing.
"""
if col:
return [
f"Column: {col}\nCheck: {check},\nCheck Values: {check_values}\n"
for check, check_values in checks.items()
if not check_values["success"]
]
return [
f"\tCheck: {check},\n\tCheck Values: {check_values}\n"
for check, check_values in checks.items()
if not check_values["success"]
]
parse_boolean = _parse_boolean
"""
:sphinx-autoapi-skip:
IMPORTANT!!! Keep it for compatibility with released 8.4.0 version of google provider.
Unfortunately the provider used _get_failed_checks and parse_boolean as imports and we should
keep those methods to avoid 8.4.0 version from failing.
"""
_PROVIDERS_MATCHER = re.compile(r"airflow\.providers\.(.*)\.hooks.*")
_MIN_SUPPORTED_PROVIDERS_VERSION = {
"amazon": "4.1.0",
"apache.drill": "2.1.0",
"apache.druid": "3.1.0",
"apache.hive": "3.1.0",
"apache.pinot": "3.1.0",
"databricks": "3.1.0",
"elasticsearch": "4.1.0",
"exasol": "3.1.0",
"google": "8.2.0",
"jdbc": "3.1.0",
"mssql": "3.1.0",
"mysql": "3.1.0",
"odbc": "3.1.0",
"oracle": "3.1.0",
"postgres": "5.1.0",
"presto": "3.1.0",
"qubole": "3.1.0",
"slack": "5.1.0",
"snowflake": "3.1.0",
"sqlite": "3.1.0",
"trino": "3.1.0",
"vertica": "3.1.0",
}
class BaseSQLOperator(BaseOperator):
"""
This is a base class for generic SQL Operator to get a DB Hook.
The provided method is .get_db_hook(). The default behavior will try to
retrieve the DB hook based on connection type.
You can customize the behavior by overriding the .get_db_hook() method.
:param conn_id: reference to a specific database
"""
def __init__(
self,
*,
conn_id: str | None = None,
database: str | None = None,
hook_params: dict | None = None,
retry_on_failure: bool = True,
**kwargs,
):
super().__init__(**kwargs)
self.conn_id = conn_id
self.database = database
self.hook_params = {} if hook_params is None else hook_params
self.retry_on_failure = retry_on_failure
@cached_property
def _hook(self):
"""Get DB Hook based on connection type."""
self.log.debug("Get connection for %s", self.conn_id)
conn = BaseHook.get_connection(self.conn_id)
hook = conn.get_hook(hook_params=self.hook_params)
if not isinstance(hook, DbApiHook):
from airflow.hooks.dbapi_hook import DbApiHook as _DbApiHook
if isinstance(hook, _DbApiHook):
# This case might happen if user installed common.sql provider but did not upgrade the
# Other provider's versions to a version that supports common.sql provider
class_module = hook.__class__.__module__
match = _PROVIDERS_MATCHER.match(class_module)
if match:
provider = match.group(1)
min_version = _MIN_SUPPORTED_PROVIDERS_VERSION.get(provider)
if min_version:
raise AirflowException(
f"You are trying to use common-sql with {hook.__class__.__name__},"
f" but the Hook class comes from provider {provider} that does not support it."
f" Please upgrade provider {provider} to at least {min_version}."
)
raise AirflowException(
f"You are trying to use `common-sql` with {hook.__class__.__name__},"
" but its provider does not support it. Please upgrade the provider to a version that"
" supports `common-sql`. The hook class should be a subclass of"
" `airflow.providers.common.sql.hooks.sql.DbApiHook`."
f" Got {hook.__class__.__name__} Hook with class hierarchy: {hook.__class__.mro()}"
)
if self.database:
hook.schema = self.database
return hook
def get_db_hook(self) -> DbApiHook:
"""
Get the database hook for the connection.
:return: the database hook object.
"""
return self._hook
def _raise_exception(self, exception_string: str) -> NoReturn:
if self.retry_on_failure:
raise AirflowException(exception_string)
raise AirflowFailException(exception_string)
class SQLExecuteQueryOperator(BaseSQLOperator):
"""
Executes SQL code in a specific database.
When implementing a specific Operator, you can also implement `_process_output` method in the
hook to perform additional processing of values returned by the DB Hook of yours. For example, you
can join description retrieved from the cursors of your statements with returned values, or save
the output of your operator to a file.
:param sql: the SQL code or string pointing to a template file to be executed (templated).
File must have a '.sql' extension.
:param autocommit: (optional) if True, each command is automatically committed (default: False).
:param parameters: (optional) the parameters to render the SQL query with.
:param handler: (optional) the function that will be applied to the cursor (default: fetch_all_handler).
:param split_statements: (optional) if split single SQL string into statements. By default, defers
to the default value in the ``run`` method of the configured hook.
:param return_last: (optional) return the result of only last statement (default: True).
:param show_return_value_in_logs: (optional) if true operator output will be printed to the task log.
Use with caution. It's not recommended to dump large datasets to the log. (default: False).
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SQLExecuteQueryOperator`
"""
template_fields: Sequence[str] = ("conn_id", "sql", "parameters")
template_ext: Sequence[str] = (".sql", ".json")
template_fields_renderers = {"sql": "sql", "parameters": "json"}
ui_color = "#cdaaed"
def __init__(
self,
*,
sql: str | list[str],
autocommit: bool = False,
parameters: Mapping | Iterable | None = None,
handler: Callable[[Any], Any] = fetch_all_handler,
split_statements: bool | None = None,
return_last: bool = True,
show_return_value_in_logs: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.autocommit = autocommit
self.parameters = parameters
self.handler = handler
self.split_statements = split_statements
self.return_last = return_last
self.show_return_value_in_logs = show_return_value_in_logs
def _process_output(self, results: list[Any], descriptions: list[Sequence[Sequence] | None]) -> list[Any]:
"""
Processes output before it is returned by the operator.
It can be overridden by the subclass in case some extra processing is needed. Note that unlike
DBApiHook return values returned - the results passed and returned by ``_process_output`` should
always be lists of results - each element of the list is a result from a single SQL statement
(typically this will be list of Rows). You have to make sure that this is the same for returned
values = there should be one element in the list for each statement executed by the hook..
The "process_output" method can override the returned output - augmenting or processing the
output as needed - the output returned will be returned as execute return value and if
do_xcom_push is set to True, it will be set as XCom returned.
:param results: results in the form of list of rows.
:param descriptions: list of descriptions returned by ``cur.description`` in the Python DBAPI
"""
if self.show_return_value_in_logs:
self.log.info("Operator output is: %s", results)
return results
def _should_run_output_processing(self) -> bool:
return self.do_xcom_push
def execute(self, context):
self.log.info("Executing: %s", self.sql)
hook = self.get_db_hook()
if self.split_statements is not None:
extra_kwargs = {"split_statements": self.split_statements}
else:
extra_kwargs = {}
output = hook.run(
sql=self.sql,
autocommit=self.autocommit,
parameters=self.parameters,
handler=self.handler if self._should_run_output_processing() else None,
return_last=self.return_last,
**extra_kwargs,
)
if not self._should_run_output_processing():
return None
if return_single_query_results(self.sql, self.return_last, self.split_statements):
# For simplicity, we pass always list as input to _process_output, regardless if
# single query results are going to be returned, and we return the first element
# of the list in this case from the (always) list returned by _process_output
return self._process_output([output], hook.descriptions)[-1]
return self._process_output(output, hook.descriptions)
def prepare_template(self) -> None:
"""Parse template file for attribute parameters."""
if isinstance(self.parameters, str):
self.parameters = ast.literal_eval(self.parameters)
def get_openlineage_facets_on_start(self) -> OperatorLineage | None:
try:
from airflow.providers.openlineage.sqlparser import SQLParser
except ImportError:
return None
hook = self.get_db_hook()
connection = hook.get_connection(getattr(hook, hook.conn_name_attr))
try:
database_info = hook.get_openlineage_database_info(connection)
except AttributeError:
self.log.debug("%s has no database info provided", hook)
database_info = None
if database_info is None:
return None
try:
sql_parser = SQLParser(
dialect=hook.get_openlineage_database_dialect(connection),
default_schema=hook.get_openlineage_default_schema(),
)
except AttributeError:
self.log.debug("%s failed to get database dialect", hook)
return None
operator_lineage = sql_parser.generate_openlineage_metadata_from_sql(
sql=self.sql,
hook=hook,
database_info=database_info,
database=self.database,
sqlalchemy_engine=hook.get_sqlalchemy_engine(),
)
return operator_lineage
def get_openlineage_facets_on_complete(self, task_instance) -> OperatorLineage | None:
try:
from airflow.providers.openlineage.extractors import OperatorLineage
except ImportError:
return None
operator_lineage = self.get_openlineage_facets_on_start() or OperatorLineage()
hook = self.get_db_hook()
try:
database_specific_lineage = hook.get_openlineage_database_specific_lineage(task_instance)
except AttributeError:
database_specific_lineage = None
if database_specific_lineage is None:
return operator_lineage
return OperatorLineage(
inputs=operator_lineage.inputs + database_specific_lineage.inputs,
outputs=operator_lineage.outputs + database_specific_lineage.outputs,
run_facets=merge_dicts(operator_lineage.run_facets, database_specific_lineage.run_facets),
job_facets=merge_dicts(operator_lineage.job_facets, database_specific_lineage.job_facets),
)
class SQLColumnCheckOperator(BaseSQLOperator):
"""
Performs one or more of the templated checks in the column_checks dictionary.
Checks are performed on a per-column basis specified by the column_mapping.
Each check can take one or more of the following options:
* ``equal_to``: an exact value to equal, cannot be used with other comparison options
* ``greater_than``: value that result should be strictly greater than
* ``less_than``: value that results should be strictly less than
* ``geq_to``: value that results should be greater than or equal to
* ``leq_to``: value that results should be less than or equal to
* ``tolerance``: the percentage that the result may be off from the expected value
* ``partition_clause``: an extra clause passed into a WHERE statement to partition data
:param table: the table to run checks on
:param column_mapping: the dictionary of columns and their associated checks, e.g.
.. code-block:: python
{
"col_name": {
"null_check": {
"equal_to": 0,
"partition_clause": "foreign_key IS NOT NULL",
},
"min": {
"greater_than": 5,
"leq_to": 10,
"tolerance": 0.2,
},
"max": {"less_than": 1000, "geq_to": 10, "tolerance": 0.01},
}
}
:param partition_clause: a partial SQL statement that is added to a WHERE clause in the query built by
the operator that creates partition_clauses for the checks to run on, e.g.
.. code-block:: python
"date = '1970-01-01'"
:param conn_id: the connection ID used to connect to the database
:param database: name of database which overwrite the defined one in connection
:param accept_none: whether or not to accept None values returned by the query. If true, converts None
to 0.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SQLColumnCheckOperator`
"""
template_fields = ("partition_clause", "table", "sql")
template_fields_renderers = {"sql": "sql"}
sql_check_template = """
SELECT '{column}' AS col_name, '{check}' AS check_type, {column}_{check} AS check_result
FROM (SELECT {check_statement} AS {column}_{check} FROM {table} {partition_clause}) AS sq
"""
column_checks = {
"null_check": "SUM(CASE WHEN {column} IS NULL THEN 1 ELSE 0 END)",
"distinct_check": "COUNT(DISTINCT({column}))",
"unique_check": "COUNT({column}) - COUNT(DISTINCT({column}))",
"min": "MIN({column})",
"max": "MAX({column})",
}
def __init__(
self,
*,
table: str,
column_mapping: dict[str, dict[str, Any]],
partition_clause: str | None = None,
conn_id: str | None = None,
database: str | None = None,
accept_none: bool = True,
**kwargs,
):
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.table = table
self.column_mapping = column_mapping
self.partition_clause = partition_clause
self.accept_none = accept_none
def _build_checks_sql():
for column, checks in self.column_mapping.items():
for check, check_values in checks.items():
self._column_mapping_validation(check, check_values)
yield self._generate_sql_query(column, checks)
checks_sql = "UNION ALL".join(_build_checks_sql())
self.sql = f"SELECT col_name, check_type, check_result FROM ({checks_sql}) AS check_columns"
def execute(self, context: Context):
hook = self.get_db_hook()
records = hook.get_records(self.sql)
if not records:
self._raise_exception(f"The following query returned zero rows: {self.sql}")
self.log.info("Record: %s", records)
for column, check, result in records:
tolerance = self.column_mapping[column][check].get("tolerance")
self.column_mapping[column][check]["result"] = result
self.column_mapping[column][check]["success"] = self._get_match(
self.column_mapping[column][check], result, tolerance
)
failed_tests = [
f"Column: {col}\n\tCheck: {check},\n\tCheck Values: {check_values}\n"
for col, checks in self.column_mapping.items()
for check, check_values in checks.items()
if not check_values["success"]
]
if failed_tests:
exception_string = (
f"Test failed.\nResults:\n{records!s}\n"
f"The following tests have failed:\n{''.join(failed_tests)}"
)
self._raise_exception(exception_string)
self.log.info("All tests have passed")
def _generate_sql_query(self, column, checks):
def _generate_partition_clause(check):
if self.partition_clause and "partition_clause" not in checks[check]:
return f"WHERE {self.partition_clause}"
elif not self.partition_clause and "partition_clause" in checks[check]:
return f"WHERE {checks[check]['partition_clause']}"
elif self.partition_clause and "partition_clause" in checks[check]:
return f"WHERE {self.partition_clause} AND {checks[check]['partition_clause']}"
else:
return ""
checks_sql = "UNION ALL".join(
self.sql_check_template.format(
check_statement=self.column_checks[check].format(column=column),
check=check,
table=self.table,
column=column,
partition_clause=_generate_partition_clause(check),
)
for check in checks
)
return checks_sql
def _get_match(self, check_values, record, tolerance=None) -> bool:
if record is None and self.accept_none:
record = 0
match_boolean = True
if "geq_to" in check_values:
if tolerance is not None:
match_boolean = record >= check_values["geq_to"] * (1 - tolerance)
else:
match_boolean = record >= check_values["geq_to"]
elif "greater_than" in check_values:
if tolerance is not None:
match_boolean = record > check_values["greater_than"] * (1 - tolerance)
else:
match_boolean = record > check_values["greater_than"]
if "leq_to" in check_values:
if tolerance is not None:
match_boolean = record <= check_values["leq_to"] * (1 + tolerance) and match_boolean
else:
match_boolean = record <= check_values["leq_to"] and match_boolean
elif "less_than" in check_values:
if tolerance is not None:
match_boolean = record < check_values["less_than"] * (1 + tolerance) and match_boolean
else:
match_boolean = record < check_values["less_than"] and match_boolean
if "equal_to" in check_values:
if tolerance is not None:
match_boolean = (
check_values["equal_to"] * (1 - tolerance)
<= record
<= check_values["equal_to"] * (1 + tolerance)
) and match_boolean
else:
match_boolean = record == check_values["equal_to"] and match_boolean
return match_boolean
def _column_mapping_validation(self, check, check_values):
if check not in self.column_checks:
raise AirflowException(f"Invalid column check: {check}.")
if (
"greater_than" not in check_values
and "geq_to" not in check_values
and "less_than" not in check_values
and "leq_to" not in check_values
and "equal_to" not in check_values
):
raise ValueError(
"Please provide one or more of: less_than, leq_to, "
"greater_than, geq_to, or equal_to in the check's dict."
)
if "greater_than" in check_values and "less_than" in check_values:
if check_values["greater_than"] >= check_values["less_than"]:
raise ValueError(
"greater_than should be strictly less than "
"less_than. Use geq_to or leq_to for "
"overlapping equality."
)
if "greater_than" in check_values and "leq_to" in check_values:
if check_values["greater_than"] >= check_values["leq_to"]:
raise ValueError(
"greater_than must be strictly less than leq_to. "
"Use geq_to with leq_to for overlapping equality."
)
if "geq_to" in check_values and "less_than" in check_values:
if check_values["geq_to"] >= check_values["less_than"]:
raise ValueError(
"geq_to should be strictly less than less_than. "
"Use leq_to with geq_to for overlapping equality."
)
if "geq_to" in check_values and "leq_to" in check_values:
if check_values["geq_to"] > check_values["leq_to"]:
raise ValueError("geq_to should be less than or equal to leq_to.")
if "greater_than" in check_values and "geq_to" in check_values:
raise ValueError("Only supply one of greater_than or geq_to.")
if "less_than" in check_values and "leq_to" in check_values:
raise ValueError("Only supply one of less_than or leq_to.")
if (
"greater_than" in check_values
or "geq_to" in check_values
or "less_than" in check_values
or "leq_to" in check_values
) and "equal_to" in check_values:
raise ValueError(
"equal_to cannot be passed with a greater or less than "
"function. To specify 'greater than or equal to' or "
"'less than or equal to', use geq_to or leq_to."
)
class SQLTableCheckOperator(BaseSQLOperator):
"""
Performs one or more of the checks provided in the checks dictionary.
Checks should be written to return a boolean result.
:param table: the table to run checks on
:param checks: the dictionary of checks, where check names are followed by a dictionary containing at
least a check statement, and optionally a partition clause, e.g.:
.. code-block:: python
{
"row_count_check": {"check_statement": "COUNT(*) = 1000"},
"column_sum_check": {"check_statement": "col_a + col_b < col_c"},
"third_check": {"check_statement": "MIN(col) = 1", "partition_clause": "col IS NOT NULL"},
}
:param partition_clause: a partial SQL statement that is added to a WHERE clause in the query built by
the operator that creates partition_clauses for the checks to run on, e.g.
.. code-block:: python
"date = '1970-01-01'"
:param conn_id: the connection ID used to connect to the database
:param database: name of database which overwrite the defined one in connection
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SQLTableCheckOperator`
"""
template_fields = ("partition_clause", "table", "sql", "conn_id")
template_fields_renderers = {"sql": "sql"}
sql_check_template = """
SELECT '{check_name}' AS check_name, MIN({check_name}) AS check_result
FROM (SELECT CASE WHEN {check_statement} THEN 1 ELSE 0 END AS {check_name}
FROM {table} {partition_clause}) AS sq
"""
def __init__(
self,
*,
table: str,
checks: dict[str, dict[str, Any]],
partition_clause: str | None = None,
conn_id: str | None = None,
database: str | None = None,
**kwargs,
):
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.table = table
self.checks = checks
self.partition_clause = partition_clause
self.sql = f"SELECT check_name, check_result FROM ({self._generate_sql_query()}) AS check_table"
def execute(self, context: Context):
hook = self.get_db_hook()
records = hook.get_records(self.sql)
if not records:
self._raise_exception(f"The following query returned zero rows: {self.sql}")
self.log.info("Record:\n%s", records)
for row in records:
check, result = row
self.checks[check]["success"] = _parse_boolean(str(result))
failed_tests = [
f"\tCheck: {check},\n\tCheck Values: {check_values}\n"
for check, check_values in self.checks.items()
if not check_values["success"]
]
if failed_tests:
exception_string = (
f"Test failed.\nQuery:\n{self.sql}\nResults:\n{records!s}\n"
f"The following tests have failed:\n{', '.join(failed_tests)}"
)
self._raise_exception(exception_string)
self.log.info("All tests have passed")
def _generate_sql_query(self):
self.log.debug("Partition clause: %s", self.partition_clause)
def _generate_partition_clause(check_name):
if self.partition_clause and "partition_clause" not in self.checks[check_name]:
return f"WHERE {self.partition_clause}"
elif not self.partition_clause and "partition_clause" in self.checks[check_name]:
return f"WHERE {self.checks[check_name]['partition_clause']}"
elif self.partition_clause and "partition_clause" in self.checks[check_name]:
return f"WHERE {self.partition_clause} AND {self.checks[check_name]['partition_clause']}"
else:
return ""
return "UNION ALL".join(
self.sql_check_template.format(
check_statement=value["check_statement"],
check_name=check_name,
table=self.table,
partition_clause=_generate_partition_clause(check_name),
)
for check_name, value in self.checks.items()
)
class SQLCheckOperator(BaseSQLOperator):
"""
Performs checks against a db.
The ``SQLCheckOperator`` expects a sql query that will return a single row.
Each value on that first row is evaluated using python ``bool`` casting.
If any of the values return ``False`` the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
:param sql: the sql to be executed. (templated)
:param conn_id: the connection ID used to connect to the database.
:param database: name of database which overwrite the defined one in connection
:param parameters: (optional) the parameters to render the SQL query with.
"""
template_fields: Sequence[str] = ("sql",)
template_ext: Sequence[str] = (
".hql",
".sql",
)
template_fields_renderers = {"sql": "sql"}
ui_color = "#fff7e6"
def __init__(
self,
*,
sql: str,
conn_id: str | None = None,
database: str | None = None,
parameters: Iterable | Mapping | None = None,
**kwargs,
) -> None:
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.sql = sql
self.parameters = parameters
def execute(self, context: Context):
self.log.info("Executing SQL check: %s", self.sql)
records = self.get_db_hook().get_first(self.sql, self.parameters)
self.log.info("Record: %s", records)
if not records:
self._raise_exception(f"The following query returned zero rows: {self.sql}")
elif not all(bool(r) for r in records):
self._raise_exception(f"Test failed.\nQuery:\n{self.sql}\nResults:\n{records!s}")
self.log.info("Success.")
class SQLValueCheckOperator(BaseSQLOperator):
"""
Performs a simple value check using sql code.
:param sql: the sql to be executed. (templated)
:param conn_id: the connection ID used to connect to the database.
:param database: name of database which overwrite the defined one in connection
"""
__mapper_args__ = {"polymorphic_identity": "SQLValueCheckOperator"}
template_fields: Sequence[str] = (
"sql",
"pass_value",
)
template_ext: Sequence[str] = (
".hql",
".sql",
)
template_fields_renderers = {"sql": "sql"}
ui_color = "#fff7e6"
def __init__(
self,
*,
sql: str,
pass_value: Any,
tolerance: Any = None,
conn_id: str | None = None,
database: str | None = None,
**kwargs,
):
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.sql = sql
self.pass_value = str(pass_value)
tol = _convert_to_float_if_possible(tolerance)
self.tol = tol if isinstance(tol, float) else None
self.has_tolerance = self.tol is not None
def execute(self, context: Context):
self.log.info("Executing SQL check: %s", self.sql)
records = self.get_db_hook().get_first(self.sql)
if not records:
self._raise_exception(f"The following query returned zero rows: {self.sql}")
pass_value_conv = _convert_to_float_if_possible(self.pass_value)
is_numeric_value_check = isinstance(pass_value_conv, float)
tolerance_pct_str = str(self.tol * 100) + "%" if self.tol is not None else None
error_msg = (
"Test failed.\nPass value:{pass_value_conv}\n"
"Tolerance:{tolerance_pct_str}\n"
"Query:\n{sql}\nResults:\n{records!s}"
).format(
pass_value_conv=pass_value_conv,
tolerance_pct_str=tolerance_pct_str,
sql=self.sql,
records=records,
)
if not is_numeric_value_check:
tests = self._get_string_matches(records, pass_value_conv)
elif is_numeric_value_check:
try:
numeric_records = self._to_float(records)
except (ValueError, TypeError):
raise AirflowException(f"Converting a result to float failed.\n{error_msg}")
tests = self._get_numeric_matches(numeric_records, pass_value_conv)
else:
tests = []
if not all(tests):
self._raise_exception(error_msg)
def _to_float(self, records):
return [float(record) for record in records]
def _get_string_matches(self, records, pass_value_conv):
return [str(record) == pass_value_conv for record in records]
def _get_numeric_matches(self, numeric_records, numeric_pass_value_conv):
if self.has_tolerance:
return [
numeric_pass_value_conv * (1 - self.tol) <= record <= numeric_pass_value_conv * (1 + self.tol)
for record in numeric_records
]
return [record == numeric_pass_value_conv for record in numeric_records]
class SQLIntervalCheckOperator(BaseSQLOperator):
"""
Check that metrics given as SQL expressions are within tolerance of the ones from days_back before.
:param table: the table name
:param conn_id: the connection ID used to connect to the database.
:param database: name of database which will overwrite the defined one in connection
:param days_back: number of days between ds and the ds we want to check
against. Defaults to 7 days
:param date_filter_column: The column name for the dates to filter on. Defaults to 'ds'
:param ratio_formula: which formula to use to compute the ratio between
the two metrics. Assuming cur is the metric of today and ref is
the metric to today - days_back. Default: 'max_over_min'
* ``max_over_min``: computes max(cur, ref) / min(cur, ref)
* ``relative_diff``: computes abs(cur-ref) / ref
:param ignore_zero: whether we should ignore zero metrics
:param metrics_thresholds: a dictionary of ratios indexed by metrics
"""
__mapper_args__ = {"polymorphic_identity": "SQLIntervalCheckOperator"}
template_fields: Sequence[str] = ("sql1", "sql2")
template_ext: Sequence[str] = (
".hql",
".sql",
)
template_fields_renderers = {"sql1": "sql", "sql2": "sql"}
ui_color = "#fff7e6"
ratio_formulas = {
"max_over_min": lambda cur, ref: float(max(cur, ref)) / min(cur, ref),
"relative_diff": lambda cur, ref: float(abs(cur - ref)) / ref,
}
def __init__(
self,
*,
table: str,
metrics_thresholds: dict[str, int],
date_filter_column: str | None = "ds",
days_back: SupportsAbs[int] = -7,
ratio_formula: str | None = "max_over_min",
ignore_zero: bool = True,
conn_id: str | None = None,
database: str | None = None,
**kwargs,
):
super().__init__(conn_id=conn_id, database=database, **kwargs)
if ratio_formula not in self.ratio_formulas:
msg_template = "Invalid diff_method: {diff_method}. Supported diff methods are: {diff_methods}"
raise AirflowFailException(
msg_template.format(diff_method=ratio_formula, diff_methods=self.ratio_formulas)
)
self.ratio_formula = ratio_formula
self.ignore_zero = ignore_zero
self.table = table
self.metrics_thresholds = metrics_thresholds
self.metrics_sorted = sorted(metrics_thresholds.keys())
self.date_filter_column = date_filter_column
self.days_back = -abs(days_back)
sqlexp = ", ".join(self.metrics_sorted)
sqlt = f"SELECT {sqlexp} FROM {table} WHERE {date_filter_column}="
self.sql1 = sqlt + "'{{ ds }}'"
self.sql2 = sqlt + "'{{ macros.ds_add(ds, " + str(self.days_back) + ") }}'"
def execute(self, context: Context):
hook = self.get_db_hook()
self.log.info("Using ratio formula: %s", self.ratio_formula)
self.log.info("Executing SQL check: %s", self.sql2)
row2 = hook.get_first(self.sql2)
self.log.info("Executing SQL check: %s", self.sql1)
row1 = hook.get_first(self.sql1)
if not row2:
self._raise_exception(f"The following query returned zero rows: {self.sql2}")
if not row1:
self._raise_exception(f"The following query returned zero rows: {self.sql1}")
current = dict(zip(self.metrics_sorted, row1))
reference = dict(zip(self.metrics_sorted, row2))
ratios: dict[str, int | None] = {}
test_results = {}
for metric in self.metrics_sorted:
cur = current[metric]
ref = reference[metric]
threshold = self.metrics_thresholds[metric]
if cur == 0 or ref == 0:
ratios[metric] = None
test_results[metric] = self.ignore_zero
else:
ratio_metric = self.ratio_formulas[self.ratio_formula](current[metric], reference[metric])
ratios[metric] = ratio_metric
if ratio_metric is not None:
test_results[metric] = ratio_metric < threshold
else:
test_results[metric] = self.ignore_zero
self.log.info(
(
"Current metric for %s: %s\n"
"Past metric for %s: %s\n"
"Ratio for %s: %s\n"
"Threshold: %s\n"
),
metric,
cur,
metric,
ref,
metric,
ratios[metric],
threshold,
)
if not all(test_results.values()):
failed_tests = [it[0] for it in test_results.items() if not it[1]]
self.log.warning(
"The following %s tests out of %s failed:",
len(failed_tests),
len(self.metrics_sorted),
)
for k in failed_tests:
self.log.warning(
"'%s' check failed. %s is above %s",
k,
ratios[k],
self.metrics_thresholds[k],
)
self._raise_exception(f"The following tests have failed:\n {', '.join(sorted(failed_tests))}")
self.log.info("All tests have passed")
class SQLThresholdCheckOperator(BaseSQLOperator):
"""
Performs a value check using sql code against a minimum threshold and a maximum threshold.
Thresholds can be in the form of a numeric value OR a sql statement that results a numeric.
:param sql: the sql to be executed. (templated)
:param conn_id: the connection ID used to connect to the database.
:param database: name of database which overwrite the defined one in connection
:param min_threshold: numerical value or min threshold sql to be executed (templated)
:param max_threshold: numerical value or max threshold sql to be executed (templated)
"""
template_fields: Sequence[str] = ("sql", "min_threshold", "max_threshold")
template_ext: Sequence[str] = (
".hql",
".sql",
)
template_fields_renderers = {"sql": "sql"}
def __init__(
self,
*,
sql: str,
min_threshold: Any,
max_threshold: Any,
conn_id: str | None = None,
database: str | None = None,
**kwargs,
):
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.sql = sql
self.min_threshold = min_threshold
self.max_threshold = max_threshold
def execute(self, context: Context):
hook = self.get_db_hook()
result = hook.get_first(self.sql)[0]
if not result:
self._raise_exception(f"The following query returned zero rows: {self.sql}")
min_threshold = _convert_to_float_if_possible(self.min_threshold)
max_threshold = _convert_to_float_if_possible(self.max_threshold)
if isinstance(min_threshold, float):
lower_bound = min_threshold
else:
lower_bound = hook.get_first(min_threshold)[0]
if isinstance(max_threshold, float):
upper_bound = max_threshold
else:
upper_bound = hook.get_first(max_threshold)[0]
meta_data = {
"result": result,
"task_id": self.task_id,
"min_threshold": lower_bound,
"max_threshold": upper_bound,
"within_threshold": lower_bound <= result <= upper_bound,
}
self.push(meta_data)
if not meta_data["within_threshold"]:
result = (
round(meta_data.get("result"), 2) # type: ignore[arg-type]
if meta_data.get("result") is not None
else "<None>"
)
error_msg = (
f'Threshold Check: "{meta_data.get("task_id")}" failed.\n'
f'DAG: {self.dag_id}\nTask_id: {meta_data.get("task_id")}\n'
f'Check description: {meta_data.get("description")}\n'
f"SQL: {self.sql}\n"
f"Result: {result} is not within thresholds "
f'{meta_data.get("min_threshold")} and {meta_data.get("max_threshold")}'
)
self._raise_exception(error_msg)
self.log.info("Test %s Successful.", self.task_id)
def push(self, meta_data):
"""
Optional: Send data check info and metadata to an external database.
Default functionality will log metadata.
"""
info = "\n".join(f"""{key}: {item}""" for key, item in meta_data.items())
self.log.info("Log from %s:\n%s", self.dag_id, info)
class BranchSQLOperator(BaseSQLOperator, SkipMixin):
"""
Allows a DAG to "branch" or follow a specified path based on the results of a SQL query.
:param sql: The SQL code to be executed, should return true or false (templated)
Template reference are recognized by str ending in '.sql'.
Expected SQL query to return a boolean (True/False), integer (0 = False, Otherwise = 1)
or string (true/y/yes/1/on/false/n/no/0/off).
:param follow_task_ids_if_true: task id or task ids to follow if query returns true
:param follow_task_ids_if_false: task id or task ids to follow if query returns false
:param conn_id: the connection ID used to connect to the database.
:param database: name of database which overwrite the defined one in connection
:param parameters: (optional) the parameters to render the SQL query with.
"""
template_fields: Sequence[str] = ("sql",)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
ui_color = "#a22034"
ui_fgcolor = "#F7F7F7"
def __init__(
self,
*,
sql: str,
follow_task_ids_if_true: list[str],
follow_task_ids_if_false: list[str],
conn_id: str = "default_conn_id",
database: str | None = None,
parameters: Iterable | Mapping | None = None,
**kwargs,
) -> None:
super().__init__(conn_id=conn_id, database=database, **kwargs)
self.sql = sql
self.parameters = parameters
self.follow_task_ids_if_true = follow_task_ids_if_true
self.follow_task_ids_if_false = follow_task_ids_if_false
def execute(self, context: Context):
self.log.info(
"Executing: %s (with parameters %s) with connection: %s",
self.sql,
self.parameters,
self.conn_id,
)
record = self.get_db_hook().get_first(self.sql, self.parameters)
if not record:
raise AirflowException(
"No rows returned from sql query. Operator expected True or False return value."
)
if isinstance(record, list):
if isinstance(record[0], list):
query_result = record[0][0]
else:
query_result = record[0]
elif isinstance(record, tuple):
query_result = record[0]
else:
query_result = record
self.log.info("Query returns %s, type '%s'", query_result, type(query_result))
follow_branch = None
try:
if isinstance(query_result, bool):
if query_result:
follow_branch = self.follow_task_ids_if_true
elif isinstance(query_result, str):
# return result is not Boolean, try to convert from String to Boolean
if _parse_boolean(query_result):
follow_branch = self.follow_task_ids_if_true
elif isinstance(query_result, int):
if bool(query_result):
follow_branch = self.follow_task_ids_if_true
else:
raise AirflowException(
f"Unexpected query return result '{query_result}' type '{type(query_result)}'"
)
if follow_branch is None:
follow_branch = self.follow_task_ids_if_false
except ValueError:
raise AirflowException(
f"Unexpected query return result '{query_result}' type '{type(query_result)}'"
)
self.skip_all_except(context["ti"], follow_branch)
| 46,888 | 38.369437 | 110 | py |
airflow | airflow-main/airflow/providers/common/sql/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/common/sql/hooks/sql.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from contextlib import closing
from datetime import datetime
from typing import TYPE_CHECKING, Any, Callable, Iterable, Mapping, Protocol, Sequence, cast
from urllib.parse import urlparse
import sqlparse
from packaging.version import Version
from sqlalchemy import create_engine
from airflow import AirflowException
from airflow.hooks.base import BaseHook
from airflow.version import version
if TYPE_CHECKING:
from airflow.providers.openlineage.extractors import OperatorLineage
from airflow.providers.openlineage.sqlparser import DatabaseInfo
def return_single_query_results(sql: str | Iterable[str], return_last: bool, split_statements: bool):
"""
Determines when results of single query only should be returned.
For compatibility reasons, the behaviour of the DBAPIHook is somewhat confusing.
In some cases, when multiple queries are run, the return value will be an iterable (list) of results
-- one for each query. However, in other cases, when single query is run, the return value will be just
the result of that single query without wrapping the results in a list.
The cases when single query results are returned without wrapping them in a list are as follows:
a) sql is string and ``return_last`` is True (regardless what ``split_statements`` value is)
b) sql is string and ``split_statements`` is False
In all other cases, the results are wrapped in a list, even if there is only one statement to process.
In particular, the return value will be a list of query results in the following circumstances:
a) when ``sql`` is an iterable of string statements (regardless what ``return_last`` value is)
b) when ``sql`` is string, ``split_statements`` is True and ``return_last`` is False
:param sql: sql to run (either string or list of strings)
:param return_last: whether last statement output should only be returned
:param split_statements: whether to split string statements.
:return: True if the hook should return single query results
"""
return isinstance(sql, str) and (return_last or not split_statements)
def fetch_all_handler(cursor) -> list[tuple] | None:
"""Handler for DbApiHook.run() to return results."""
if not hasattr(cursor, "description"):
raise RuntimeError(
"The database we interact with does not support DBAPI 2.0. Use operator and "
"handlers that are specifically designed for your database."
)
if cursor.description is not None:
return cursor.fetchall()
else:
return None
def fetch_one_handler(cursor) -> list[tuple] | None:
"""Handler for DbApiHook.run() to return first result."""
if not hasattr(cursor, "description"):
raise RuntimeError(
"The database we interact with does not support DBAPI 2.0. Use operator and "
"handlers that are specifically designed for your database."
)
if cursor.description is not None:
return cursor.fetchone()
else:
return None
class ConnectorProtocol(Protocol):
"""A protocol where you can connect to a database."""
def connect(self, host: str, port: int, username: str, schema: str) -> Any:
"""
Connect to a database.
:param host: The database host to connect to.
:param port: The database port to connect to.
:param username: The database username used for the authentication.
:param schema: The database schema to connect to.
:return: the authorized connection object.
"""
# In case we are running it on Airflow 2.4+, we should use BaseHook, but on Airflow 2.3 and below
# We want the DbApiHook to derive from the original DbApiHook from airflow, because otherwise
# SqlSensor and BaseSqlOperator from "airflow.operators" and "airflow.sensors" will refuse to
# accept the new Hooks as not derived from the original DbApiHook
if Version(version) < Version("2.4"):
try:
from airflow.hooks.dbapi import DbApiHook as BaseForDbApiHook
except ImportError:
# just in case we have a problem with circular import
BaseForDbApiHook: type[BaseHook] = BaseHook # type: ignore[no-redef]
else:
BaseForDbApiHook: type[BaseHook] = BaseHook # type: ignore[no-redef]
class DbApiHook(BaseForDbApiHook):
"""
Abstract base class for sql hooks.
:param schema: Optional DB schema that overrides the schema specified in the connection. Make sure that
if you change the schema parameter value in the constructor of the derived Hook, such change
should be done before calling the ``DBApiHook.__init__()``.
:param log_sql: Whether to log SQL query when it's executed. Defaults to *True*.
"""
# Override to provide the connection name.
conn_name_attr: str
# Override to have a default connection id for a particular dbHook
default_conn_name = "default_conn_id"
# Override if this db supports autocommit.
supports_autocommit = False
# Override with the object that exposes the connect method
connector: ConnectorProtocol | None = None
# Override with db-specific query to check connection
_test_connection_sql = "select 1"
# Override with the db-specific value used for placeholders
placeholder: str = "%s"
def __init__(self, *args, schema: str | None = None, log_sql: bool = True, **kwargs):
super().__init__()
if not self.conn_name_attr:
raise AirflowException("conn_name_attr is not defined")
elif len(args) == 1:
setattr(self, self.conn_name_attr, args[0])
elif self.conn_name_attr not in kwargs:
setattr(self, self.conn_name_attr, self.default_conn_name)
else:
setattr(self, self.conn_name_attr, kwargs[self.conn_name_attr])
# We should not make schema available in deriving hooks for backwards compatibility
# If a hook deriving from DBApiHook has a need to access schema, then it should retrieve it
# from kwargs and store it on its own. We do not run "pop" here as we want to give the
# Hook deriving from the DBApiHook to still have access to the field in its constructor
self.__schema = schema
self.log_sql = log_sql
self.descriptions: list[Sequence[Sequence] | None] = []
def get_conn(self):
"""Returns a connection object."""
db = self.get_connection(getattr(self, cast(str, self.conn_name_attr)))
return self.connector.connect(host=db.host, port=db.port, username=db.login, schema=db.schema)
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted uri.
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
conn.schema = self.__schema or conn.schema
return conn.get_uri()
def get_sqlalchemy_engine(self, engine_kwargs=None):
"""
Get an sqlalchemy_engine object.
:param engine_kwargs: Kwargs used in :func:`~sqlalchemy.create_engine`.
:return: the created engine.
"""
if engine_kwargs is None:
engine_kwargs = {}
return create_engine(self.get_uri(), **engine_kwargs)
def get_pandas_df(self, sql, parameters=None, **kwargs):
"""
Executes the sql and returns a pandas dataframe.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
"""
try:
from pandas.io import sql as psql
except ImportError:
raise Exception(
"pandas library not installed, run: pip install "
"'apache-airflow-providers-common-sql[pandas]'."
)
with closing(self.get_conn()) as conn:
return psql.read_sql(sql, con=conn, params=parameters, **kwargs)
def get_pandas_df_by_chunks(self, sql, parameters=None, *, chunksize, **kwargs):
"""
Executes the sql and returns a generator.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with
:param chunksize: number of rows to include in each chunk
:param kwargs: (optional) passed into pandas.io.sql.read_sql method
"""
try:
from pandas.io import sql as psql
except ImportError:
raise Exception(
"pandas library not installed, run: pip install "
"'apache-airflow-providers-common-sql[pandas]'."
)
with closing(self.get_conn()) as conn:
yield from psql.read_sql(sql, con=conn, params=parameters, chunksize=chunksize, **kwargs)
def get_records(
self,
sql: str | list[str],
parameters: Iterable | Mapping | None = None,
) -> Any:
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
return self.run(sql=sql, parameters=parameters, handler=fetch_all_handler)
def get_first(self, sql: str | list[str], parameters: Iterable | Mapping | None = None) -> Any:
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
return self.run(sql=sql, parameters=parameters, handler=fetch_one_handler)
@staticmethod
def strip_sql_string(sql: str) -> str:
return sql.strip().rstrip(";")
@staticmethod
def split_sql_string(sql: str) -> list[str]:
"""
Splits string into multiple SQL expressions.
:param sql: SQL string potentially consisting of multiple expressions
:return: list of individual expressions
"""
splits = sqlparse.split(sqlparse.format(sql, strip_comments=True))
return [s for s in splits if s]
@property
def last_description(self) -> Sequence[Sequence] | None:
if not self.descriptions:
return None
return self.descriptions[-1]
def run(
self,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping | None = None,
handler: Callable | None = None,
split_statements: bool = False,
return_last: bool = True,
) -> Any | list[Any] | None:
"""Run a command or a list of commands.
Pass a list of SQL statements to the sql parameter to get them to
execute sequentially.
The method will return either single query results (typically list of rows) or list of those results
where each element in the list are results of one of the queries (typically list of list of rows :D)
For compatibility reasons, the behaviour of the DBAPIHook is somewhat confusing.
In some cases, when multiple queries are run, the return value will be an iterable (list) of results
-- one for each query. However, in other cases, when single query is run, the return value will
be the result of that single query without wrapping the results in a list.
The cases when single query results are returned without wrapping them in a list are as follows:
a) sql is string and ``return_last`` is True (regardless what ``split_statements`` value is)
b) sql is string and ``split_statements`` is False
In all other cases, the results are wrapped in a list, even if there is only one statement to process.
In particular, the return value will be a list of query results in the following circumstances:
a) when ``sql`` is an iterable of string statements (regardless what ``return_last`` value is)
b) when ``sql`` is string, ``split_statements`` is True and ``return_last`` is False
After ``run`` is called, you may access the following properties on the hook object:
* ``descriptions``: an array of cursor descriptions. If ``return_last`` is True, this will be
a one-element array containing the cursor ``description`` for the last statement.
Otherwise, it will contain the cursor description for each statement executed.
* ``last_description``: the description for the last statement executed
Note that query result will ONLY be actually returned when a handler is provided; if
``handler`` is None, this method will return None.
Handler is a way to process the rows from cursor (Iterator) into a value that is suitable to be
returned to XCom and generally fit in memory.
You can use pre-defined handles (``fetch_all_handler``, ``fetch_one_handler``) or implement your
own handler.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
:param parameters: The parameters to render the SQL query with.
:param handler: The result handler which is called with the result of each statement.
:param split_statements: Whether to split a single SQL string into statements and run separately
:param return_last: Whether to return result for only last statement or for all after split
:return: if handler provided, returns query results (may be list of results depending on params)
"""
self.descriptions = []
if isinstance(sql, str):
if split_statements:
sql_list: Iterable[str] = self.split_sql_string(sql)
else:
sql_list = [sql] if sql.strip() else []
else:
sql_list = sql
if sql_list:
self.log.debug("Executing following statements against DB: %s", sql_list)
else:
raise ValueError("List of SQL statements is empty")
_last_result = None
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
results = []
for sql_statement in sql_list:
self._run_command(cur, sql_statement, parameters)
if handler is not None:
result = handler(cur)
if return_single_query_results(sql, return_last, split_statements):
_last_result = result
_last_description = cur.description
else:
results.append(result)
self.descriptions.append(cur.description)
# If autocommit was set to False or db does not support autocommit, we do a manual commit.
if not self.get_autocommit(conn):
conn.commit()
if handler is None:
return None
if return_single_query_results(sql, return_last, split_statements):
self.descriptions = [_last_description]
return _last_result
else:
return results
def _run_command(self, cur, sql_statement, parameters):
"""Runs a statement using an already open cursor."""
if self.log_sql:
self.log.info("Running statement: %s, parameters: %s", sql_statement, parameters)
if parameters:
cur.execute(sql_statement, parameters)
else:
cur.execute(sql_statement)
# According to PEP 249, this is -1 when query result is not applicable.
if cur.rowcount >= 0:
self.log.info("Rows affected: %s", cur.rowcount)
def set_autocommit(self, conn, autocommit):
"""Sets the autocommit flag on the connection."""
if not self.supports_autocommit and autocommit:
self.log.warning(
"%s connection doesn't support autocommit but autocommit activated.",
getattr(self, self.conn_name_attr),
)
conn.autocommit = autocommit
def get_autocommit(self, conn) -> bool:
"""Get autocommit setting for the provided connection.
:param conn: Connection to get autocommit setting from.
:return: connection autocommit setting. True if ``autocommit`` is set
to True on the connection. False if it is either not set, set to
False, or the connection does not support auto-commit.
"""
return getattr(conn, "autocommit", False) and self.supports_autocommit
def get_cursor(self):
"""Returns a cursor."""
return self.get_conn().cursor()
@classmethod
def _generate_insert_sql(cls, table, values, target_fields, replace, **kwargs) -> str:
"""Helper class method that generates the INSERT SQL statement.
The REPLACE variant is specific to MySQL syntax.
:param table: Name of the target table
:param values: The row to insert into the table
:param target_fields: The names of the columns to fill in the table
:param replace: Whether to replace instead of insert
:return: The generated INSERT or REPLACE SQL statement
"""
placeholders = [
cls.placeholder,
] * len(values)
if target_fields:
target_fields = ", ".join(target_fields)
target_fields = f"({target_fields})"
else:
target_fields = ""
if not replace:
sql = "INSERT INTO "
else:
sql = "REPLACE INTO "
sql += f"{table} {target_fields} VALUES ({','.join(placeholders)})"
return sql
def insert_rows(self, table, rows, target_fields=None, commit_every=1000, replace=False, **kwargs):
"""Insert a collection of tuples into a table.
Rows are inserted in chunks, each chunk (of size ``commit_every``) is
done in a new transaction.
:param table: Name of the target table
:param rows: The rows to insert into the table
:param target_fields: The names of the columns to fill in the table
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:param replace: Whether to replace instead of insert
"""
i = 0
with closing(self.get_conn()) as conn:
if self.supports_autocommit:
self.set_autocommit(conn, False)
conn.commit()
with closing(conn.cursor()) as cur:
for i, row in enumerate(rows, 1):
lst = []
for cell in row:
lst.append(self._serialize_cell(cell, conn))
values = tuple(lst)
sql = self._generate_insert_sql(table, values, target_fields, replace, **kwargs)
self.log.debug("Generated sql: %s", sql)
cur.execute(sql, values)
if commit_every and i % commit_every == 0:
conn.commit()
self.log.info("Loaded %s rows into %s so far", i, table)
conn.commit()
self.log.info("Done loading. Loaded a total of %s rows into %s", i, table)
@staticmethod
def _serialize_cell(cell, conn=None) -> str | None:
"""
Returns the SQL literal of the cell as a string.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The serialized cell
"""
if cell is None:
return None
if isinstance(cell, datetime):
return cell.isoformat()
return str(cell)
def bulk_dump(self, table, tmp_file):
"""
Dumps a database table into a tab-delimited file.
:param table: The name of the source table
:param tmp_file: The path of the target file
"""
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
"""
Loads a tab-delimited file into a database table.
:param table: The name of the target table
:param tmp_file: The path of the file to load into the table
"""
raise NotImplementedError()
def test_connection(self):
"""Tests the connection using db-specific query."""
status, message = False, ""
try:
if self.get_first(self._test_connection_sql):
status = True
message = "Connection successfully tested"
except Exception as e:
status = False
message = str(e)
return status, message
def get_openlineage_database_info(self, connection) -> DatabaseInfo | None:
"""
Returns database specific information needed to generate and parse lineage metadata.
This includes information helpful for constructing information schema query
and creating correct namespace.
:param connection: Airflow connection to reduce calls of `get_connection` method
"""
def get_openlineage_database_dialect(self, connection) -> str:
"""
Returns database dialect used for SQL parsing.
For a list of supported dialects check: https://openlineage.io/docs/development/sql#sql-dialects
"""
return "generic"
def get_openlineage_default_schema(self) -> str | None:
"""
Returns default schema specific to database.
.. seealso::
- :class:`airflow.providers.openlineage.sqlparser.SQLParser`
"""
return self.__schema or "public"
def get_openlineage_database_specific_lineage(self, task_instance) -> OperatorLineage | None:
"""
Returns additional database specific lineage, e.g. query execution information.
This method is called only on completion of the task.
:param task_instance: this may be used to retrieve additional information
that is collected during runtime of the task
"""
@staticmethod
def get_openlineage_authority_part(connection) -> str:
"""
This method serves as common method for several hooks to get authority part from Airflow Connection.
The authority represents the hostname and port of the connection
and conforms OpenLineage naming convention for a number of databases (e.g. MySQL, Postgres, Trino).
"""
parsed = urlparse(connection.get_uri())
authority = f"{parsed.hostname}:{parsed.port}"
return authority
| 23,801 | 40.684764 | 110 | py |
airflow | airflow-main/airflow/providers/common/sql/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/common/sql/sensors/sql.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Sequence
from airflow import AirflowException
from airflow.hooks.base import BaseHook
from airflow.providers.common.sql.hooks.sql import DbApiHook
from airflow.sensors.base import BaseSensorOperator
class SqlSensor(BaseSensorOperator):
"""Run a sql statement repeatedly until a criteria is met.
This will keep trying until success or failure criteria are met, or if the
first cell is not either ``0``, ``'0'``, ``''``, or ``None``. Optional
success and failure callables are called with the first cell returned as the
argument.
If success callable is defined, the sensor will keep retrying until the
criteria is met. If failure callable is defined, and the criteria is met,
the sensor will raise AirflowException. Failure criteria is evaluated before
success criteria. A fail_on_empty boolean can also be passed to the sensor
in which case it will fail if no rows have been returned.
:param conn_id: The connection to run the sensor against
:param sql: The sql to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
:param parameters: The parameters to render the SQL query with (optional).
:param success: Success criteria for the sensor is a Callable that takes first_cell
as the only argument, and returns a boolean (optional).
:param failure: Failure criteria for the sensor is a Callable that takes first_cell
as the only argument and return a boolean (optional).
:param fail_on_empty: Explicitly fail on no rows returned.
:param hook_params: Extra config params to be passed to the underlying hook.
Should match the desired hook constructor params.
"""
template_fields: Sequence[str] = ("sql",)
template_ext: Sequence[str] = (
".hql",
".sql",
)
ui_color = "#7c7287"
def __init__(
self,
*,
conn_id,
sql,
parameters=None,
success=None,
failure=None,
fail_on_empty=False,
hook_params=None,
**kwargs,
):
self.conn_id = conn_id
self.sql = sql
self.parameters = parameters
self.success = success
self.failure = failure
self.fail_on_empty = fail_on_empty
self.hook_params = hook_params
super().__init__(**kwargs)
def _get_hook(self):
conn = BaseHook.get_connection(self.conn_id)
hook = conn.get_hook(hook_params=self.hook_params)
if not isinstance(hook, DbApiHook):
raise AirflowException(
f"The connection type is not supported by {self.__class__.__name__}. "
f"The associated hook should be a subclass of `DbApiHook`. Got {hook.__class__.__name__}"
)
return hook
def poke(self, context: Any):
hook = self._get_hook()
self.log.info("Poking: %s (with parameters %s)", self.sql, self.parameters)
records = hook.get_records(self.sql, self.parameters)
if not records:
if self.fail_on_empty:
raise AirflowException("No rows returned, raising as per fail_on_empty flag")
else:
return False
first_cell = records[0][0]
if self.failure is not None:
if callable(self.failure):
if self.failure(first_cell):
raise AirflowException(f"Failure criteria met. self.failure({first_cell}) returned True")
else:
raise AirflowException(f"self.failure is present, but not callable -> {self.failure}")
if self.success is not None:
if callable(self.success):
return self.success(first_cell)
else:
raise AirflowException(f"self.success is present, but not callable -> {self.success}")
return bool(first_cell)
| 4,726 | 40.104348 | 109 | py |
airflow | airflow-main/airflow/providers/common/sql/sensors/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/imap/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.2.2"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-imap:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,529 | 35.428571 | 113 | py |
airflow | airflow-main/airflow/providers/imap/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/imap/hooks/imap.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module provides everything to search mail for a specific attachment and download it.
It uses the imaplib library that is already integrated in python 3.
"""
from __future__ import annotations
import email
import imaplib
import os
import re
from typing import Any, Iterable
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models.connection import Connection
from airflow.utils.log.logging_mixin import LoggingMixin
class ImapHook(BaseHook):
"""
This hook connects to a mail server by using the imap protocol.
.. note:: Please call this Hook as context manager via `with`
to automatically open and close the connection to the mail server.
:param imap_conn_id: The :ref:`imap connection id <howto/connection:imap>`
that contains the information used to authenticate the client.
"""
conn_name_attr = "imap_conn_id"
default_conn_name = "imap_default"
conn_type = "imap"
hook_name = "IMAP"
def __init__(self, imap_conn_id: str = default_conn_name) -> None:
super().__init__()
self.imap_conn_id = imap_conn_id
self.mail_client: imaplib.IMAP4_SSL | imaplib.IMAP4 | None = None
def __enter__(self) -> ImapHook:
return self.get_conn()
def __exit__(self, exc_type, exc_val, exc_tb):
self.mail_client.logout()
def get_conn(self) -> ImapHook:
"""
Login to the mail server.
.. note:: Please call this Hook as context manager via `with`
to automatically open and close the connection to the mail server.
:return: an authorized ImapHook object.
"""
if not self.mail_client:
conn = self.get_connection(self.imap_conn_id)
self.mail_client = self._build_client(conn)
self.mail_client.login(conn.login, conn.password)
return self
def _build_client(self, conn: Connection) -> imaplib.IMAP4_SSL | imaplib.IMAP4:
IMAP: type[imaplib.IMAP4_SSL] | type[imaplib.IMAP4]
if conn.extra_dejson.get("use_ssl", True):
IMAP = imaplib.IMAP4_SSL
else:
IMAP = imaplib.IMAP4
if conn.port:
mail_client = IMAP(conn.host, conn.port)
else:
mail_client = IMAP(conn.host)
return mail_client
def has_mail_attachment(
self, name: str, *, check_regex: bool = False, mail_folder: str = "INBOX", mail_filter: str = "All"
) -> bool:
"""
Checks the mail folder for mails containing attachments with the given name.
:param name: The name of the attachment that will be searched for.
:param check_regex: Checks the name for a regular expression.
:param mail_folder: The mail folder where to look at.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:returns: True if there is an attachment with the given name and False if not.
"""
mail_attachments = self._retrieve_mails_attachments_by_name(
name, check_regex, True, mail_folder, mail_filter
)
return len(mail_attachments) > 0
def retrieve_mail_attachments(
self,
name: str,
*,
check_regex: bool = False,
latest_only: bool = False,
mail_folder: str = "INBOX",
mail_filter: str = "All",
not_found_mode: str = "raise",
) -> list[tuple]:
"""
Retrieves mail's attachments in the mail folder by its name.
:param name: The name of the attachment that will be downloaded.
:param check_regex: Checks the name for a regular expression.
:param latest_only: If set to True it will only retrieve the first matched attachment.
:param mail_folder: The mail folder where to look at.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
:returns: a list of tuple each containing the attachment filename and its payload.
"""
mail_attachments = self._retrieve_mails_attachments_by_name(
name, check_regex, latest_only, mail_folder, mail_filter
)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
return mail_attachments
def download_mail_attachments(
self,
name: str,
local_output_directory: str,
*,
check_regex: bool = False,
latest_only: bool = False,
mail_folder: str = "INBOX",
mail_filter: str = "All",
not_found_mode: str = "raise",
) -> None:
"""
Downloads mail's attachments in the mail folder by its name to the local directory.
:param name: The name of the attachment that will be downloaded.
:param local_output_directory: The output directory on the local machine
where the files will be downloaded to.
:param check_regex: Checks the name for a regular expression.
:param latest_only: If set to True it will only download the first matched attachment.
:param mail_folder: The mail folder where to look at.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:param not_found_mode: Specify what should happen if no attachment has been found.
Supported values are 'raise', 'warn' and 'ignore'.
If it is set to 'raise' it will raise an exception,
if set to 'warn' it will only print a warning and
if set to 'ignore' it won't notify you at all.
"""
mail_attachments = self._retrieve_mails_attachments_by_name(
name, check_regex, latest_only, mail_folder, mail_filter
)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
self._create_files(mail_attachments, local_output_directory)
def _handle_not_found_mode(self, not_found_mode: str) -> None:
if not_found_mode not in ("raise", "warn", "ignore"):
self.log.error('Invalid "not_found_mode" %s', not_found_mode)
elif not_found_mode == "raise":
raise AirflowException("No mail attachments found!")
elif not_found_mode == "warn":
self.log.warning("No mail attachments found!")
def _retrieve_mails_attachments_by_name(
self, name: str, check_regex: bool, latest_only: bool, mail_folder: str, mail_filter: str
) -> list:
if not self.mail_client:
raise Exception("The 'mail_client' should be initialized before!")
all_matching_attachments = []
self.mail_client.select(mail_folder)
for mail_id in self._list_mail_ids_desc(mail_filter):
response_mail_body = self._fetch_mail_body(mail_id)
matching_attachments = self._check_mail_body(response_mail_body, name, check_regex, latest_only)
if matching_attachments:
all_matching_attachments.extend(matching_attachments)
if latest_only:
break
self.mail_client.close()
return all_matching_attachments
def _list_mail_ids_desc(self, mail_filter: str) -> Iterable[str]:
if not self.mail_client:
raise Exception("The 'mail_client' should be initialized before!")
_, data = self.mail_client.search(None, mail_filter)
mail_ids = data[0].split()
return reversed(mail_ids)
def _fetch_mail_body(self, mail_id: str) -> str:
if not self.mail_client:
raise Exception("The 'mail_client' should be initialized before!")
_, data = self.mail_client.fetch(mail_id, "(RFC822)")
mail_body = data[0][1] # type: ignore # The mail body is always in this specific location
mail_body_str = mail_body.decode("utf-8") # type: ignore
return mail_body_str
def _check_mail_body(
self, response_mail_body: str, name: str, check_regex: bool, latest_only: bool
) -> list[tuple[Any, Any]]:
mail = Mail(response_mail_body)
if mail.has_attachments():
return mail.get_attachments_by_name(name, check_regex, find_first=latest_only)
return []
def _create_files(self, mail_attachments: list, local_output_directory: str) -> None:
for name, payload in mail_attachments:
if self._is_symlink(name):
self.log.error("Can not create file because it is a symlink!")
elif self._is_escaping_current_directory(name):
self.log.error("Can not create file because it is escaping the current directory!")
else:
self._create_file(name, payload, local_output_directory)
def _is_symlink(self, name: str) -> bool:
# IMPORTANT NOTE: os.path.islink is not working for windows symlinks
# See: https://stackoverflow.com/a/11068434
return os.path.islink(name)
def _is_escaping_current_directory(self, name: str) -> bool:
return "../" in name
def _correct_path(self, name: str, local_output_directory: str) -> str:
return (
local_output_directory + name
if local_output_directory.endswith("/")
else local_output_directory + "/" + name
)
def _create_file(self, name: str, payload: Any, local_output_directory: str) -> None:
file_path = self._correct_path(name, local_output_directory)
with open(file_path, "wb") as file:
file.write(payload)
class Mail(LoggingMixin):
"""
This class simplifies working with mails returned by the imaplib client.
:param mail_body: The mail body of a mail received from imaplib client.
"""
def __init__(self, mail_body: str) -> None:
super().__init__()
self.mail = email.message_from_string(mail_body)
def has_attachments(self) -> bool:
"""
Checks the mail for a attachments.
:returns: True if it has attachments and False if not.
"""
return self.mail.get_content_maintype() == "multipart"
def get_attachments_by_name(
self, name: str, check_regex: bool, find_first: bool = False
) -> list[tuple[Any, Any]]:
"""
Gets all attachments by name for the mail.
:param name: The name of the attachment to look for.
:param check_regex: Checks the name for a regular expression.
:param find_first: If set to True it will only find the first match and then quit.
:returns: a list of tuples each containing name and payload
where the attachments name matches the given name.
"""
attachments = []
for attachment in self._iterate_attachments():
found_attachment = (
attachment.has_matching_name(name) if check_regex else attachment.has_equal_name(name)
)
if found_attachment:
file_name, file_payload = attachment.get_file()
self.log.info("Found attachment: %s", file_name)
attachments.append((file_name, file_payload))
if find_first:
break
return attachments
def _iterate_attachments(self) -> Iterable[MailPart]:
for part in self.mail.walk():
mail_part = MailPart(part)
if mail_part.is_attachment():
yield mail_part
class MailPart:
"""
This class is a wrapper for a Mail object's part and gives it more features.
:param part: The mail part in a Mail object.
"""
def __init__(self, part: Any) -> None:
self.part = part
def is_attachment(self) -> bool:
"""
Checks if the part is a valid mail attachment.
:returns: True if it is an attachment and False if not.
"""
return self.part.get_content_maintype() != "multipart" and self.part.get("Content-Disposition")
def has_matching_name(self, name: str) -> tuple[Any, Any] | None:
"""
Checks if the given name matches the part's name.
:param name: The name to look for.
:returns: True if it matches the name (including regular expression).
"""
return re.match(name, self.part.get_filename()) # type: ignore
def has_equal_name(self, name: str) -> bool:
"""
Checks if the given name is equal to the part's name.
:param name: The name to look for.
:returns: True if it is equal to the given name.
"""
return self.part.get_filename() == name
def get_file(self) -> tuple:
"""
Gets the file including name and payload.
:returns: the part's name and payload.
"""
return self.part.get_filename(), self.part.get_payload(decode=True)
| 14,068 | 37.545205 | 108 | py |
airflow | airflow-main/airflow/providers/imap/sensors/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/imap/sensors/imap_attachment.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module allows you to poke for attachments on a mail server."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.providers.imap.hooks.imap import ImapHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class ImapAttachmentSensor(BaseSensorOperator):
"""
Waits for a specific attachment on a mail server.
:param attachment_name: The name of the attachment that will be checked.
:param check_regex: If set to True the attachment's name will be parsed as regular expression.
Through this you can get a broader set of attachments
that it will look for than just only the equality of the attachment name.
:param mail_folder: The mail folder in where to search for the attachment.
:param mail_filter: If set other than 'All' only specific mails will be checked.
See :py:meth:`imaplib.IMAP4.search` for details.
:param imap_conn_id: The :ref:`imap connection id <howto/connection:imap>` to run the sensor against.
"""
template_fields: Sequence[str] = ("attachment_name", "mail_filter")
def __init__(
self,
*,
attachment_name,
check_regex=False,
mail_folder="INBOX",
mail_filter="All",
conn_id="imap_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.attachment_name = attachment_name
self.check_regex = check_regex
self.mail_folder = mail_folder
self.mail_filter = mail_filter
self.conn_id = conn_id
def poke(self, context: Context) -> bool:
"""
Pokes for a mail attachment on the mail server.
:param context: The context that is being provided when poking.
:return: True if attachment with the given name is present and False if not.
"""
self.log.info("Poking for %s", self.attachment_name)
with ImapHook(imap_conn_id=self.conn_id) as imap_hook:
return imap_hook.has_mail_attachment(
name=self.attachment_name,
check_regex=self.check_regex,
mail_folder=self.mail_folder,
mail_filter=self.mail_filter,
)
| 3,054 | 37.1875 | 105 | py |
airflow | airflow-main/airflow/providers/datadog/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.3.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-datadog:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,532 | 35.5 | 116 | py |
airflow | airflow-main/airflow/providers/datadog/hooks/datadog.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from typing import Any
from datadog import api, initialize # type: ignore[attr-defined]
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class DatadogHook(BaseHook, LoggingMixin):
"""
Uses datadog API to send metrics of practically anything measurable.
It's possible to track # of db records inserted/deleted, records read
from file and many other useful metrics.
Depends on the datadog API, which has to be deployed on the same server where
Airflow runs.
:param datadog_conn_id: The connection to datadog, containing metadata for api keys.
"""
conn_name_attr = "datadog_conn_id"
default_conn_name = "datadog_default"
conn_type = "datadog"
hook_name = "Datadog"
def __init__(self, datadog_conn_id: str = "datadog_default") -> None:
super().__init__()
conn = self.get_connection(datadog_conn_id)
self.api_key = conn.extra_dejson.get("api_key", None)
self.app_key = conn.extra_dejson.get("app_key", None)
self.api_host = conn.extra_dejson.get("api_host", None)
self.source_type_name = conn.extra_dejson.get("source_type_name", None)
# If the host is populated, it will use that hostname instead.
# for all metric submissions.
self.host = conn.host
if self.api_key is None:
raise AirflowException("api_key must be specified in the Datadog connection details")
self.log.info("Setting up api keys for Datadog")
initialize(api_key=self.api_key, app_key=self.app_key, api_host=self.api_host)
def validate_response(self, response: dict[str, Any]) -> None:
"""Validate Datadog response."""
if response["status"] != "ok":
self.log.error("Datadog returned: %s", response)
raise AirflowException("Error status received from Datadog")
def send_metric(
self,
metric_name: str,
datapoint: float | int,
tags: list[str] | None = None,
type_: str | None = None,
interval: int | None = None,
) -> dict[str, Any]:
"""
Sends a single datapoint metric to Datadog.
:param metric_name: The name of the metric
:param datapoint: A single integer or float related to the metric
:param tags: A list of tags associated with the metric
:param type_: Type of your metric: gauge, rate, or count
:param interval: If the type of the metric is rate or count, define the corresponding interval
"""
response = api.Metric.send(
metric=metric_name, points=datapoint, host=self.host, tags=tags, type=type_, interval=interval
)
self.validate_response(response)
return response
def query_metric(self, query: str, from_seconds_ago: int, to_seconds_ago: int) -> dict[str, Any]:
"""
Query datadog for a metric, potentially with some function applied to it and return the result.
:param query: The datadog query to execute (see datadog docs)
:param from_seconds_ago: How many seconds ago to start querying for.
:param to_seconds_ago: Up to how many seconds ago to query for.
"""
now = int(time.time())
response = api.Metric.query(start=now - from_seconds_ago, end=now - to_seconds_ago, query=query)
self.validate_response(response)
return response
def post_event(
self,
title: str,
text: str,
aggregation_key: str | None = None,
alert_type: str | None = None,
date_happened: int | None = None,
handle: str | None = None,
priority: str | None = None,
related_event_id: int | None = None,
tags: list[str] | None = None,
device_name: list[str] | None = None,
) -> dict[str, Any]:
"""
Posts an event to datadog (processing finished, potentially alerts, other issues).
Think about this as a means to maintain persistence of alerts, rather than alerting itself.
:param title: The title of the event
:param text: The body of the event (more information)
:param aggregation_key: Key that can be used to aggregate this event in a stream
:param alert_type: The alert type for the event, one of
["error", "warning", "info", "success"]
:param date_happened: POSIX timestamp of the event; defaults to now
:handle: User to post the event as; defaults to owner of the application key used
to submit.
:param handle: str
:param priority: Priority to post the event as. ("normal" or "low", defaults to "normal")
:param related_event_id: Post event as a child of the given event
:param tags: List of tags to apply to the event
:param device_name: device_name to post the event with
"""
response = api.Event.create(
title=title,
text=text,
aggregation_key=aggregation_key,
alert_type=alert_type,
date_happened=date_happened,
handle=handle,
priority=priority,
related_event_id=related_event_id,
tags=tags,
host=self.host,
device_name=device_name,
source_type_name=self.source_type_name,
)
self.validate_response(response)
return response
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"api_host": StringField(lazy_gettext("API endpoint"), widget=BS3TextFieldWidget()),
"api_key": StringField(lazy_gettext("API key"), widget=BS3TextFieldWidget()),
"app_key": StringField(lazy_gettext("Application key"), widget=BS3TextFieldWidget()),
"source_type_name": StringField(lazy_gettext("Source type name"), widget=BS3TextFieldWidget()),
}
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "login", "password", "port", "extra"],
"relabeling": {"host": "Events host name"},
}
| 7,292 | 39.071429 | 107 | py |
airflow | airflow-main/airflow/providers/datadog/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/datadog/sensors/datadog.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable
from datadog import api
from airflow.exceptions import AirflowException
from airflow.providers.datadog.hooks.datadog import DatadogHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DatadogSensor(BaseSensorOperator):
"""
A sensor to listen, with a filter, to datadog event streams and determine if some event was emitted.
Depends on the datadog API, which has to be deployed on the same server where Airflow runs.
:param datadog_conn_id: The connection to datadog, containing metadata for api keys.
:param from_seconds_ago: POSIX timestamp start (default 3600).
:param up_to_seconds_from_now: POSIX timestamp end (default 0).
:param priority: Priority of your events, either low or normal.
:param sources: A comma separated list indicating what tags, if any,
should be used to filter the list of monitors by scope
:param tags: Get datadog events from specific sources.
:param response_check: A check against the 'requests' response object. The callable takes
the response object as the first positional argument and optionally any number of
keyword arguments available in the context dictionary. It should return True for
'pass' and False otherwise.
:param response_check: Callable[[dict[str, Any]], bool] | None
"""
ui_color = "#66c3dd"
def __init__(
self,
*,
datadog_conn_id: str = "datadog_default",
from_seconds_ago: int = 3600,
up_to_seconds_from_now: int = 0,
priority: str | None = None,
sources: str | None = None,
tags: list[str] | None = None,
response_check: Callable[[dict[str, Any]], bool] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.datadog_conn_id = datadog_conn_id
self.from_seconds_ago = from_seconds_ago
self.up_to_seconds_from_now = up_to_seconds_from_now
self.priority = priority
self.sources = sources
self.tags = tags
self.response_check = response_check
def poke(self, context: Context) -> bool:
# This instantiates the hook, but doesn't need it further,
# because the API authenticates globally (unfortunately),
# but for airflow this shouldn't matter too much, because each
# task instance runs in its own process anyway.
DatadogHook(datadog_conn_id=self.datadog_conn_id)
response = api.Event.query(
start=self.from_seconds_ago,
end=self.up_to_seconds_from_now,
priority=self.priority,
sources=self.sources,
tags=self.tags,
)
if isinstance(response, dict) and response.get("status", "ok") != "ok":
self.log.error("Unexpected Datadog result: %s", response)
raise AirflowException("Datadog returned unexpected result")
if self.response_check:
# run content check on response
return self.response_check(response)
# If no check was inserted, assume any event that matched yields true.
return len(response) > 0
| 4,052 | 39.53 | 104 | py |
airflow | airflow-main/airflow/providers/datadog/sensors/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apprise/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "1.0.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apprise:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,532 | 35.5 | 116 | py |
airflow | airflow-main/airflow/providers/apprise/notifications/apprise.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import Iterable
from airflow.exceptions import AirflowOptionalProviderFeatureException
try:
from airflow.notifications.basenotifier import BaseNotifier
except ImportError:
raise AirflowOptionalProviderFeatureException(
"Failed to import BaseNotifier. This feature is only available in Airflow versions >= 2.6.0"
)
from airflow.providers.apprise.hooks.apprise import AppriseHook
from apprise import AppriseConfig, NotifyFormat, NotifyType
class AppriseNotifier(BaseNotifier):
"""
Apprise BaseNotifier.
:param body: Specify the message body
:param title: Specify the message title. This field is complete optional
:param notify_type: Specify the message type (default=info). Possible values are "info",
"success", "failure", and "warning"
:param body_format: Specify the input message format (default=text). Possible values are "text",
"html", and "markdown".
:param tag: Specify one or more tags to filter which services to notify
:param attach: Specify one or more file attachment locations
:param interpret_escapes: Enable interpretation of backslash escapes. For example, this would convert
sequences such as \\n and \\r to their respected ascii new-line and carriage
:param config: Specify one or more configuration
:param apprise_conn_id: connection that has Apprise configs setup
""" # noqa: D301
template_fields = ("body", "title", "tag", "attach")
def __init__(
self,
*,
body: str,
title: str | None = None,
notify_type: NotifyType | None = None,
body_format: NotifyFormat | None = None,
tag: str | Iterable[str] | None = None,
attach: str | None = None,
interpret_escapes: bool | None = None,
config: AppriseConfig | None = None,
apprise_conn_id: str = AppriseHook.default_conn_name,
):
super().__init__()
self.apprise_conn_id = apprise_conn_id
self.body = body
self.title = title
self.notify_type = notify_type
self.body_format = body_format
self.tag = tag
self.attach = attach
self.interpret_escapes = interpret_escapes
self.config = config
@cached_property
def hook(self) -> AppriseHook:
"""Apprise Hook."""
return AppriseHook(apprise_conn_id=self.apprise_conn_id)
def notify(self, context):
"""Send a alert to a apprise configured service."""
self.hook.notify(
body=self.body,
title=self.title,
notify_type=self.notify_type,
body_format=self.body_format,
tag=self.tag,
attach=self.attach,
interpret_escapes=self.interpret_escapes,
config=self.config,
)
send_apprise_notification = AppriseNotifier
| 3,714 | 36.15 | 105 | py |
airflow | airflow-main/airflow/providers/apprise/notifications/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
| 1,053 | 41.16 | 82 | py |
airflow | airflow-main/airflow/providers/apprise/hooks/apprise.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from typing import Any, Iterable
import apprise
from airflow.hooks.base import BaseHook
from apprise import AppriseConfig, NotifyFormat, NotifyType
class AppriseHook(BaseHook):
"""
Use Apprise(https://github.com/caronc/apprise) to interact with notification services.
The complete list of notification services supported by Apprise can be found at:
https://github.com/caronc/apprise/wiki#notification-services.
:param apprise_conn_id: :ref:`Apprise connection id <howto/connection:apprise>`
that has services configured in the `config` field.
"""
conn_name_attr = "apprise_conn_id"
default_conn_name = "apprise_default"
conn_type = "apprise"
hook_name = "Apprise"
def __init__(self, apprise_conn_id: str = default_conn_name) -> None:
super().__init__()
self.apprise_conn_id = apprise_conn_id
def get_config_from_conn(self):
conn = self.get_connection(self.apprise_conn_id)
return json.loads(conn.extra_dejson["config"])
def set_config_from_conn(self, apprise_obj: apprise.Apprise):
"""Set config from connection to apprise object."""
config_object = self.get_config_from_conn()
if isinstance(config_object, list):
for config in config_object:
apprise_obj.add(config["path"], tag=config.get("tag", None))
elif isinstance(config_object, dict):
apprise_obj.add(config_object["path"], tag=config_object.get("tag", None))
else:
raise ValueError(
f"Only types of dict or list[dict] are expected in Apprise connections,"
f" got {type(config_object)}"
)
def notify(
self,
body: str,
title: str | None = None,
notify_type: NotifyType = NotifyType.INFO,
body_format: NotifyFormat = NotifyFormat.TEXT,
tag: str | Iterable[str] | None = None,
attach: str | Iterable[str] | None = None,
interpret_escapes: bool | None = None,
config: AppriseConfig | None = None,
):
"""
Send message to plugged-in services.
:param body: Specify the message body
:param title: Specify the message title. (optional)
:param notify_type: Specify the message type (default=info). Possible values are "info",
"success", "failure", and "warning"
:param body_format: Specify the input message format (default=text). Possible values are "text",
"html", and "markdown".
:param tag: Specify one or more tags to filter which services to notify
:param attach: Specify one or more file attachment locations
:param interpret_escapes: Enable interpretation of backslash escapes. For example, this would convert
sequences such as \\n and \\r to their respective ascii new-line and carriage return characters
:param config: Specify one or more configuration
""" # noqa: D301
title = title or ""
apprise_obj = apprise.Apprise()
if config:
apprise_obj.add(config)
else:
self.set_config_from_conn(apprise_obj)
apprise_obj.notify(
body=body,
title=title,
notify_type=notify_type,
body_format=body_format,
tag=tag,
attach=attach,
interpret_escapes=interpret_escapes,
)
def get_conn(self) -> None:
raise NotImplementedError()
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField
return {
"config": PasswordField(
lazy_gettext("config"),
widget=BS3PasswordFieldWidget(),
description='format example - {"path": "service url", "tag": "alerts"} or '
'[{"path": "service url", "tag": "alerts"},'
' {"path": "service url", "tag": "alerts"}]',
),
}
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
return {
"hidden_fields": ["host", "schema", "login", "password", "port", "extra"],
"relabeling": {},
}
| 5,224 | 37.703704 | 109 | py |
airflow | airflow-main/airflow/providers/apprise/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
| 1,053 | 41.16 | 82 | py |
airflow | airflow-main/airflow/providers/pagerduty/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.3.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-pagerduty:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,534 | 35.547619 | 118 | py |
airflow | airflow-main/airflow/providers/pagerduty/notifications/pagerduty.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import Any
from airflow.exceptions import AirflowOptionalProviderFeatureException
try:
from airflow.notifications.basenotifier import BaseNotifier
except ImportError:
raise AirflowOptionalProviderFeatureException(
"Failed to import BaseNotifier. This feature is only available in Airflow versions >= 2.6.0"
)
from airflow.providers.pagerduty.hooks.pagerduty_events import PagerdutyEventsHook
class PagerdutyNotifier(BaseNotifier):
"""
Pagerduty BaseNotifier.
:param summary: Summary for the event
:param severity: Severity for the event, needs to be one of: info, warning, error, critical
:param source: Specific human-readable unique identifier, such as a
hostname, for the system having the problem.
:param action: Event action, needs to be one of: trigger, acknowledge,
resolve. Default to trigger if not specified.
:param dedup_key: A string which identifies the alert triggered for the given event.
Required for the actions acknowledge and resolve.
:param custom_details: Free-form details from the event. Can be a dictionary or a string.
If a dictionary is passed it will show up in PagerDuty as a table.
:param group: A cluster or grouping of sources. For example, sources
"prod-datapipe-02" and "prod-datapipe-03" might both be part of "prod-datapipe"
:param component: The part or component of the affected system that is broken.
:param class_type: The class/type of the event.
:param images: List of images to include. Each dictionary in the list accepts the following keys:
`src`: The source (URL) of the image being attached to the incident. This image must be served via
HTTPS.
`href`: [Optional] URL to make the image a clickable link.
`alt`: [Optional] Alternative text for the image.
:param links: List of links to include. Each dictionary in the list accepts the following keys:
`href`: URL of the link to be attached.
`text`: [Optional] Plain text that describes the purpose of the link, and can be used as the
link's text.
:param integration_key: PagerDuty Events API token
:param pagerduty_events_conn_id: connection that has PagerDuty integration key in the Pagerduty
API token field
"""
template_fields = (
"summary",
"severity",
"source",
"action",
"dedup_key",
"custom_details",
"group",
"component",
"class_type",
"images",
"links",
)
def __init__(
self,
*,
summary: str,
severity: str,
source: str = "airflow",
action: str = "trigger",
dedup_key: str | None = None,
custom_details: Any | None = None,
group: str | None = None,
component: str | None = None,
class_type: str | None = None,
images: list[Any] | None = None,
links: list[Any] | None = None,
pagerduty_events_conn_id: str = "pagerduty_events_default",
integration_key: str | None = None,
):
super().__init__()
self.pagerduty_events_conn_id = pagerduty_events_conn_id
self.integration_key = integration_key
self.summary = summary
self.severity = severity
self.source = source
self.action = action
self.dedup_key = dedup_key
self.custom_details = custom_details
self.custom_details = custom_details
self.group = group
self.component = component
self.class_type = class_type
self.class_type = class_type
self.images = images
self.links = links
@cached_property
def hook(self) -> PagerdutyEventsHook:
"""Pagerduty Events Hook."""
return PagerdutyEventsHook(
pagerduty_events_conn_id=self.pagerduty_events_conn_id, integration_key=self.integration_key
)
def notify(self, context):
"""Send a alert to a pagerduty event v2 API."""
self.hook.create_event(
summary=self.summary,
severity=self.severity,
source=self.source,
action=self.action,
dedup_key=self.dedup_key,
custom_details=self.custom_details,
group=self.group,
component=self.component,
class_type=self.class_type,
images=self.images,
links=self.links,
)
send_pagerduty_notification = PagerdutyNotifier
| 5,368 | 37.35 | 106 | py |
airflow | airflow-main/airflow/providers/pagerduty/notifications/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/pagerduty/hooks/pagerduty_events.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for sending or receiving data from PagerDuty as well as creating PagerDuty incidents."""
from __future__ import annotations
import warnings
from datetime import datetime
from typing import Any
import pdpyras
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.hooks.base import BaseHook
class PagerdutyEventsHook(BaseHook):
"""
This class can be used to interact with the Pagerduty Events API.
It takes both an Events API token and a PagerDuty connection with the Events API token
(i.e. Integration key) as the password/Pagerduty API token. If both supplied, the token will be used.
:param integration_key: PagerDuty Events API token
:param pagerduty_events_conn_id: connection that has PagerDuty integration key in the Pagerduty
API token field
"""
conn_name_attr = "pagerduty_events_conn_id"
default_conn_name = "pagerduty_events_default"
conn_type = "pagerduty_events"
hook_name = "Pagerduty Events"
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["port", "login", "schema", "host", "extra"],
"relabeling": {
"password": "Pagerduty Integration key",
},
}
def __init__(
self, integration_key: str | None = None, pagerduty_events_conn_id: str | None = None
) -> None:
super().__init__()
self.integration_key = None
self._session = None
if pagerduty_events_conn_id is not None:
conn = self.get_connection(pagerduty_events_conn_id)
self.integration_key = conn.get_password()
if integration_key is not None: # token takes higher priority
self.integration_key = integration_key
if self.integration_key is None:
raise AirflowException(
"Cannot get token: No valid integration key nor pagerduty_events_conn_id supplied."
)
def create_event(
self,
summary: str,
severity: str,
source: str = "airflow",
action: str = "trigger",
dedup_key: str | None = None,
custom_details: Any | None = None,
group: str | None = None,
component: str | None = None,
class_type: str | None = None,
images: list[Any] | None = None,
links: list[Any] | None = None,
) -> dict:
"""
Create event for service integration.
:param summary: Summary for the event
:param severity: Severity for the event, needs to be one of: info, warning, error, critical
:param source: Specific human-readable unique identifier, such as a
hostname, for the system having the problem.
:param action: Event action, needs to be one of: trigger, acknowledge,
resolve. Default to trigger if not specified.
:param dedup_key: A string which identifies the alert triggered for the given event.
Required for the actions acknowledge and resolve.
:param custom_details: Free-form details from the event. Can be a dictionary or a string.
If a dictionary is passed it will show up in PagerDuty as a table.
:param group: A cluster or grouping of sources. For example, sources
"prod-datapipe-02" and "prod-datapipe-03" might both be part of "prod-datapipe"
:param component: The part or component of the affected system that is broken.
:param class_type: The class/type of the event.
:param images: List of images to include. Each dictionary in the list accepts the following keys:
`src`: The source (URL) of the image being attached to the incident. This image must be served via
HTTPS.
`href`: [Optional] URL to make the image a clickable link.
`alt`: [Optional] Alternative text for the image.
:param links: List of links to include. Each dictionary in the list accepts the following keys:
`href`: URL of the link to be attached.
`text`: [Optional] Plain text that describes the purpose of the link, and can be used as the
link's text.
:return: PagerDuty Events API v2 response.
"""
warnings.warn(
"This method will be deprecated. Please use the "
"`PagerdutyEventsHook.send_event` to interact with the Events API",
AirflowProviderDeprecationWarning,
stacklevel=1,
)
data = PagerdutyEventsHook.prepare_event_data(
summary=summary,
severity=severity,
source=source,
custom_details=custom_details,
component=component,
group=group,
class_type=class_type,
action=action,
dedup_key=dedup_key,
images=images,
links=links,
action_key_name="event_action",
)
session = pdpyras.EventsAPISession(self.integration_key)
resp = session.post("/v2/enqueue", json=data)
resp.raise_for_status()
return resp.json()
def send_event(
self,
summary: str,
severity: str,
source: str = "airflow",
action: str = "trigger",
dedup_key: str | None = None,
custom_details: Any | None = None,
group: str | None = None,
component: str | None = None,
class_type: str | None = None,
images: list[Any] | None = None,
links: list[Any] | None = None,
) -> dict:
"""
Create event for service integration.
:param summary: Summary for the event
:param severity: Severity for the event, needs to be one of: info, warning, error, critical
:param source: Specific human-readable unique identifier, such as a
hostname, for the system having the problem.
:param action: Event action, needs to be one of: trigger, acknowledge,
resolve. Default to trigger if not specified.
:param dedup_key: A string which identifies the alert triggered for the given event.
Required for the actions acknowledge and resolve.
:param custom_details: Free-form details from the event. Can be a dictionary or a string.
If a dictionary is passed it will show up in PagerDuty as a table.
:param group: A cluster or grouping of sources. For example, sources
"prod-datapipe-02" and "prod-datapipe-03" might both be part of "prod-datapipe"
:param component: The part or component of the affected system that is broken.
:param class_type: The class/type of the event.
:param images: List of images to include. Each dictionary in the list accepts the following keys:
`src`: The source (URL) of the image being attached to the incident. This image must be served via
HTTPS.
`href`: [Optional] URL to make the image a clickable link.
`alt`: [Optional] Alternative text for the image.
:param links: List of links to include. Each dictionary in the list accepts the following keys:
`href`: URL of the link to be attached.
`text`: [Optional] Plain text that describes the purpose of the link, and can be used as the
link's text.
:return: PagerDuty Events API v2 response.
"""
data = PagerdutyEventsHook.prepare_event_data(
summary=summary,
severity=severity,
source=source,
custom_details=custom_details,
component=component,
group=group,
class_type=class_type,
action=action,
dedup_key=dedup_key,
images=images,
links=links,
)
session = pdpyras.EventsAPISession(self.integration_key)
return session.send_event(**data)
@staticmethod
def prepare_event_data(
summary,
severity,
source,
custom_details,
component,
group,
class_type,
action,
dedup_key,
images,
links,
action_key_name: str = "action",
) -> dict:
"""Prepare event data for send_event / post('/v2/enqueue') method."""
payload = {
"summary": summary,
"severity": severity,
"source": source,
}
if custom_details is not None:
payload["custom_details"] = custom_details
if component:
payload["component"] = component
if group:
payload["group"] = group
if class_type:
payload["class"] = class_type
actions = ("trigger", "acknowledge", "resolve")
if action not in actions:
raise ValueError(f"Event action must be one of: {', '.join(actions)}")
data = {
action_key_name: action,
"payload": payload,
}
if dedup_key:
data["dedup_key"] = dedup_key
elif action != "trigger":
raise ValueError(
f"The dedup_key property is required for {action_key_name}={action} events,"
f" and it must be a string."
)
if images is not None:
data["images"] = images
if links is not None:
data["links"] = links
return data
def create_change_event(
self,
summary: str,
source: str = "airflow",
custom_details: Any | None = None,
timestamp: datetime | None = None,
links: list[Any] | None = None,
) -> dict:
"""
Create change event for service integration.
:param summary: Summary for the event
:param source: Specific human-readable unique identifier, such as a
hostname, for the system having the problem.
:param custom_details: Free-form details from the event. Can be a dictionary or a string.
If a dictionary is passed it will show up in PagerDuty as a table.
:param timestamp: The time at which the emitting tool detected or generated the event.
:param links: List of links to include. Each dictionary in the list accepts the following keys:
`href`: URL of the link to be attached.
`text`: [Optional] Plain text that describes the purpose of the link, and can be used as the
link's text.
:return: PagerDuty Change Events API v2 response.
"""
payload = {
"summary": summary,
}
if custom_details is not None:
payload["custom_details"] = custom_details
if timestamp is not None:
payload["timestamp"] = timestamp.isoformat()
if source is not None:
payload["source"] = source
data: dict[str, Any] = {"payload": payload}
if links is not None:
data["links"] = links
session = pdpyras.ChangeEventsAPISession(self.integration_key)
return session.send_change_event(payload=payload, links=links)
def test_connection(self):
try:
session = pdpyras.EventsAPISession(self.integration_key)
session.resolve("some_dedup_key_that_dont_exist")
except Exception:
return False, "connection test failed, invalid routing key"
return True, "connection tested successfully"
| 12,253 | 39.177049 | 110 | py |
airflow | airflow-main/airflow/providers/pagerduty/hooks/pagerduty.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for sending or receiving data from PagerDuty as well as creating PagerDuty incidents."""
from __future__ import annotations
import warnings
from typing import Any
import pdpyras
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.hooks.base import BaseHook
from airflow.providers.pagerduty.hooks.pagerduty_events import PagerdutyEventsHook
class PagerdutyHook(BaseHook):
"""
The PagerdutyHook can be used to interact with both the PagerDuty API and the PagerDuty Events API.
Takes both PagerDuty API token directly and connection that has PagerDuty API token.
If both supplied, PagerDuty API token will be used.
In these cases, the PagerDuty API token refers to an account token:
https://support.pagerduty.com/docs/generating-api-keys#generating-a-general-access-rest-api-key
https://support.pagerduty.com/docs/generating-api-keys#generating-a-personal-rest-api-key
In order to send events (with the Pagerduty Events API), you will also need to specify the
routing_key (or Integration key) in the ``extra`` field
:param token: PagerDuty API token
:param pagerduty_conn_id: connection that has PagerDuty API token in the password field
"""
conn_name_attr = "pagerduty_conn_id"
default_conn_name = "pagerduty_default"
conn_type = "pagerduty"
hook_name = "Pagerduty"
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["port", "login", "schema", "host"],
"relabeling": {
"password": "Pagerduty API token",
},
}
def __init__(self, token: str | None = None, pagerduty_conn_id: str | None = None) -> None:
super().__init__()
self.routing_key = None
self._session = None
if pagerduty_conn_id is not None:
conn = self.get_connection(pagerduty_conn_id)
self.token = conn.get_password()
routing_key = conn.extra_dejson.get("routing_key")
if routing_key:
self.routing_key = routing_key
if token is not None: # token takes higher priority
self.token = token
if self.token is None:
raise AirflowException("Cannot get token: No valid api token nor pagerduty_conn_id supplied.")
def get_session(self) -> pdpyras.APISession:
"""
Returns `pdpyras.APISession` for use with sending or receiving data through the PagerDuty REST API.
The `pdpyras` library supplies a class `pdpyras.APISession` extending `requests.Session` from the
Requests HTTP library.
Documentation on how to use the `APISession` class can be found at:
https://pagerduty.github.io/pdpyras/#data-access-abstraction
"""
self._session = pdpyras.APISession(self.token)
return self._session
def create_event(
self,
summary: str,
severity: str,
source: str = "airflow",
action: str = "trigger",
routing_key: str | None = None,
dedup_key: str | None = None,
custom_details: Any | None = None,
group: str | None = None,
component: str | None = None,
class_type: str | None = None,
images: list[Any] | None = None,
links: list[Any] | None = None,
) -> dict:
"""
Create event for service integration.
:param summary: Summary for the event
:param severity: Severity for the event, needs to be one of: info, warning, error, critical
:param source: Specific human-readable unique identifier, such as a
hostname, for the system having the problem.
:param action: Event action, needs to be one of: trigger, acknowledge,
resolve. Default to trigger if not specified.
:param routing_key: Integration key. If not specified, will try to read
from connection's extra json blob.
:param dedup_key: A string which identifies the alert triggered for the given event.
Required for the actions acknowledge and resolve.
:param custom_details: Free-form details from the event. Can be a dictionary or a string.
If a dictionary is passed it will show up in PagerDuty as a table.
:param group: A cluster or grouping of sources. For example, sources
"prod-datapipe-02" and "prod-datapipe-03" might both be part of "prod-datapipe"
:param component: The part or component of the affected system that is broken.
:param class_type: The class/type of the event.
:param images: List of images to include. Each dictionary in the list accepts the following keys:
`src`: The source (URL) of the image being attached to the incident. This image must be served via
HTTPS.
`href`: [Optional] URL to make the image a clickable link.
`alt`: [Optional] Alternative text for the image.
:param links: List of links to include. Each dictionary in the list accepts the following keys:
`href`: URL of the link to be attached.
`text`: [Optional] Plain text that describes the purpose of the link, and can be used as the
link's text.
:return: PagerDuty Events API v2 response.
"""
warnings.warn(
"This method will be deprecated. Please use the "
"`airflow.providers.pagerduty.hooks.PagerdutyEventsHook` to interact with the Events API",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
routing_key = routing_key or self.routing_key
return PagerdutyEventsHook(integration_key=routing_key).create_event(
summary=summary,
severity=severity,
source=source,
action=action,
dedup_key=dedup_key,
custom_details=custom_details,
group=group,
component=component,
class_type=class_type,
images=images,
links=links,
)
def test_connection(self):
try:
session = pdpyras.APISession(self.token)
session.list_all("services", params={"query": "some_non_existing_service"})
except Exception:
return False, "connection test failed, invalid token"
return True, "connection tested successfully"
| 7,254 | 41.676471 | 110 | py |
airflow | airflow-main/airflow/providers/pagerduty/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/databricks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.3.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-databricks:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,535 | 35.571429 | 119 | py |
airflow | airflow-main/airflow/providers/databricks/operators/databricks.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Databricks operators."""
from __future__ import annotations
import time
import warnings
from functools import cached_property
from logging import Logger
from typing import TYPE_CHECKING, Any, Sequence
from airflow.configuration import conf
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator, BaseOperatorLink, XCom
from airflow.providers.databricks.hooks.databricks import DatabricksHook, RunState
from airflow.providers.databricks.triggers.databricks import DatabricksExecutionTrigger
from airflow.providers.databricks.utils.databricks import normalise_json_content, validate_trigger_event
if TYPE_CHECKING:
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
DEFER_METHOD_NAME = "execute_complete"
XCOM_RUN_ID_KEY = "run_id"
XCOM_JOB_ID_KEY = "job_id"
XCOM_RUN_PAGE_URL_KEY = "run_page_url"
def _handle_databricks_operator_execution(operator, hook, log, context) -> None:
"""
Handles the Airflow + Databricks lifecycle logic for a Databricks operator.
:param operator: Databricks operator being handled
:param context: Airflow context
"""
if operator.do_xcom_push and context is not None:
context["ti"].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id)
log.info("Run submitted with run_id: %s", operator.run_id)
run_page_url = hook.get_run_page_url(operator.run_id)
if operator.do_xcom_push and context is not None:
context["ti"].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url)
if operator.wait_for_termination:
while True:
run_info = hook.get_run(operator.run_id)
run_state = RunState(**run_info["state"])
if run_state.is_terminal:
if run_state.is_successful:
log.info("%s completed successfully.", operator.task_id)
log.info("View run status, Spark UI, and logs at %s", run_page_url)
return
else:
if run_state.result_state == "FAILED":
task_run_id = None
if "tasks" in run_info:
for task in run_info["tasks"]:
if task.get("state", {}).get("result_state", "") == "FAILED":
task_run_id = task["run_id"]
if task_run_id is not None:
run_output = hook.get_run_output(task_run_id)
if "error" in run_output:
notebook_error = run_output["error"]
else:
notebook_error = run_state.state_message
else:
notebook_error = run_state.state_message
error_message = (
f"{operator.task_id} failed with terminal state: {run_state} "
f"and with the error {notebook_error}"
)
else:
error_message = (
f"{operator.task_id} failed with terminal state: {run_state} "
f"and with the error {run_state.state_message}"
)
raise AirflowException(error_message)
else:
log.info("%s in run state: %s", operator.task_id, run_state)
log.info("View run status, Spark UI, and logs at %s", run_page_url)
log.info("Sleeping for %s seconds.", operator.polling_period_seconds)
time.sleep(operator.polling_period_seconds)
else:
log.info("View run status, Spark UI, and logs at %s", run_page_url)
def _handle_deferrable_databricks_operator_execution(operator, hook, log, context) -> None:
"""
Handles the Airflow + Databricks lifecycle logic for deferrable Databricks operators.
:param operator: Databricks async operator being handled
:param context: Airflow context
"""
job_id = hook.get_job_id(operator.run_id)
if operator.do_xcom_push and context is not None:
context["ti"].xcom_push(key=XCOM_JOB_ID_KEY, value=job_id)
if operator.do_xcom_push and context is not None:
context["ti"].xcom_push(key=XCOM_RUN_ID_KEY, value=operator.run_id)
log.info("Run submitted with run_id: %s", operator.run_id)
run_page_url = hook.get_run_page_url(operator.run_id)
if operator.do_xcom_push and context is not None:
context["ti"].xcom_push(key=XCOM_RUN_PAGE_URL_KEY, value=run_page_url)
log.info("View run status, Spark UI, and logs at %s", run_page_url)
if operator.wait_for_termination:
operator.defer(
trigger=DatabricksExecutionTrigger(
run_id=operator.run_id,
databricks_conn_id=operator.databricks_conn_id,
polling_period_seconds=operator.polling_period_seconds,
retry_limit=operator.databricks_retry_limit,
retry_delay=operator.databricks_retry_delay,
retry_args=operator.databricks_retry_args,
run_page_url=run_page_url,
),
method_name=DEFER_METHOD_NAME,
)
def _handle_deferrable_databricks_operator_completion(event: dict, log: Logger) -> None:
validate_trigger_event(event)
run_state = RunState.from_json(event["run_state"])
run_page_url = event["run_page_url"]
log.info("View run status, Spark UI, and logs at %s", run_page_url)
if run_state.is_successful:
log.info("Job run completed successfully.")
return
else:
error_message = f"Job run failed with terminal state: {run_state}"
raise AirflowException(error_message)
class DatabricksJobRunLink(BaseOperatorLink):
"""Constructs a link to monitor a Databricks Job Run."""
name = "See Databricks Job Run"
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
return XCom.get_value(key=XCOM_RUN_PAGE_URL_KEY, ti_key=ti_key)
class DatabricksSubmitRunOperator(BaseOperator):
"""
Submits a Spark job run to Databricks using the api/2.1/jobs/runs/submit API endpoint.
See: https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit
There are three ways to instantiate this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DatabricksSubmitRunOperator`
:param tasks: Array of Objects(RunSubmitTaskSettings) <= 100 items.
.. seealso::
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit
:param json: A JSON object containing API parameters which will be passed
directly to the ``api/2.1/jobs/runs/submit`` endpoint. The other named parameters
(i.e. ``spark_jar_task``, ``notebook_task``..) to this operator will
be merged with this json dictionary if they are provided.
If there are conflicts during the merge, the named parameters will
take precedence and override the top level json keys. (templated)
.. seealso::
For more information about templating see :ref:`concepts:jinja-templating`.
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit
:param spark_jar_task: The main class and parameters for the JAR task. Note that
the actual JAR is specified in the ``libraries``.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobssparkjartask
:param notebook_task: The notebook path and parameters for the notebook task.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobsnotebooktask
:param spark_python_task: The python file path and parameters to run the python file with.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobssparkpythontask
:param spark_submit_task: Parameters needed to run a spark-submit command.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobssparksubmittask
:param pipeline_task: Parameters needed to execute a Delta Live Tables pipeline task.
The provided dictionary must contain at least ``pipeline_id`` field!
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobspipelinetask
:param dbt_task: Parameters needed to execute a dbt task.
The provided dictionary must contain at least the ``commands`` field and the
``git_source`` parameter also needs to be set.
*EITHER* ``spark_jar_task`` *OR* ``notebook_task`` *OR* ``spark_python_task``
*OR* ``spark_submit_task`` *OR* ``pipeline_task`` *OR* ``dbt_task`` should be specified.
This field will be templated.
:param new_cluster: Specs for a new cluster on which this task will be run.
*EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be specified
(except when ``pipeline_task`` is used).
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#jobsclusterspecnewcluster
:param existing_cluster_id: ID for existing cluster on which to run this task.
*EITHER* ``new_cluster`` *OR* ``existing_cluster_id`` should be specified
(except when ``pipeline_task`` is used).
This field will be templated.
:param libraries: Libraries which this run will use.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/2.0/jobs.html#managedlibrarieslibrary
:param run_name: The run name used for this task.
By default this will be set to the Airflow ``task_id``. This ``task_id`` is a
required parameter of the superclass ``BaseOperator``.
This field will be templated.
:param idempotency_token: an optional token that can be used to guarantee the idempotency of job run
requests. If a run with the provided token already exists, the request does not create a new run but
returns the ID of the existing run instead. This token must have at most 64 characters.
:param access_control_list: optional list of dictionaries representing Access Control List (ACL) for
a given job run. Each dictionary consists of following field - specific subject (``user_name`` for
users, or ``group_name`` for groups), and ``permission_level`` for that subject. See Jobs API
documentation for more details.
:param wait_for_termination: if we should wait for termination of the job run. ``True`` by default.
:param timeout_seconds: The timeout for this run. By default a value of 0 is used
which means to have no timeout.
This field will be templated.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
By default and in the common case this will be ``databricks_default``. To use
token based authentication, provide the key ``token`` in the extra field for the
connection and create the key ``host`` and leave the ``host`` field empty. (templated)
:param polling_period_seconds: Controls the rate which we poll for the result of
this run. By default the operator will poll every 30 seconds.
:param databricks_retry_limit: Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
:param databricks_retry_delay: Number of seconds to wait between retries (it
might be a floating point number).
:param databricks_retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
:param do_xcom_push: Whether we should push run_id and run_page_url to xcom.
:param git_source: Optional specification of a remote git repository from which
supported task types are retrieved.
:param deferrable: Run operator in the deferrable mode.
.. seealso::
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit
"""
# Used in airflow.models.BaseOperator
template_fields: Sequence[str] = ("json", "databricks_conn_id")
template_ext: Sequence[str] = (".json-tpl",)
# Databricks brand color (blue) under white text
ui_color = "#1CB1C2"
ui_fgcolor = "#fff"
operator_extra_links = (DatabricksJobRunLink(),)
def __init__(
self,
*,
json: Any | None = None,
tasks: list[object] | None = None,
spark_jar_task: dict[str, str] | None = None,
notebook_task: dict[str, str] | None = None,
spark_python_task: dict[str, str | list[str]] | None = None,
spark_submit_task: dict[str, list[str]] | None = None,
pipeline_task: dict[str, str] | None = None,
dbt_task: dict[str, str | list[str]] | None = None,
new_cluster: dict[str, object] | None = None,
existing_cluster_id: str | None = None,
libraries: list[dict[str, Any]] | None = None,
run_name: str | None = None,
timeout_seconds: int | None = None,
databricks_conn_id: str = "databricks_default",
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
databricks_retry_args: dict[Any, Any] | None = None,
do_xcom_push: bool = True,
idempotency_token: str | None = None,
access_control_list: list[dict[str, str]] | None = None,
wait_for_termination: bool = True,
git_source: dict[str, str] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
"""Creates a new ``DatabricksSubmitRunOperator``."""
super().__init__(**kwargs)
self.json = json or {}
self.databricks_conn_id = databricks_conn_id
self.polling_period_seconds = polling_period_seconds
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
self.databricks_retry_args = databricks_retry_args
self.wait_for_termination = wait_for_termination
self.deferrable = deferrable
if tasks is not None:
self.json["tasks"] = tasks
if spark_jar_task is not None:
self.json["spark_jar_task"] = spark_jar_task
if notebook_task is not None:
self.json["notebook_task"] = notebook_task
if spark_python_task is not None:
self.json["spark_python_task"] = spark_python_task
if spark_submit_task is not None:
self.json["spark_submit_task"] = spark_submit_task
if pipeline_task is not None:
self.json["pipeline_task"] = pipeline_task
if dbt_task is not None:
self.json["dbt_task"] = dbt_task
if new_cluster is not None:
self.json["new_cluster"] = new_cluster
if existing_cluster_id is not None:
self.json["existing_cluster_id"] = existing_cluster_id
if libraries is not None:
self.json["libraries"] = libraries
if run_name is not None:
self.json["run_name"] = run_name
if timeout_seconds is not None:
self.json["timeout_seconds"] = timeout_seconds
if "run_name" not in self.json:
self.json["run_name"] = run_name or kwargs["task_id"]
if idempotency_token is not None:
self.json["idempotency_token"] = idempotency_token
if access_control_list is not None:
self.json["access_control_list"] = access_control_list
if git_source is not None:
self.json["git_source"] = git_source
if "dbt_task" in self.json and "git_source" not in self.json:
raise AirflowException("git_source is required for dbt_task")
# This variable will be used in case our task gets killed.
self.run_id: int | None = None
self.do_xcom_push = do_xcom_push
@cached_property
def _hook(self):
return self._get_hook(caller="DatabricksSubmitRunOperator")
def _get_hook(self, caller: str) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
retry_args=self.databricks_retry_args,
caller=caller,
)
def execute(self, context: Context):
json_normalised = normalise_json_content(self.json)
self.run_id = self._hook.submit_run(json_normalised)
if self.deferrable:
_handle_deferrable_databricks_operator_execution(self, self._hook, self.log, context)
else:
_handle_databricks_operator_execution(self, self._hook, self.log, context)
def on_kill(self):
if self.run_id:
self._hook.cancel_run(self.run_id)
self.log.info(
"Task: %s with run_id: %s was requested to be cancelled.", self.task_id, self.run_id
)
else:
self.log.error("Error: Task: %s with invalid run_id was requested to be cancelled.", self.task_id)
def execute_complete(self, context: dict | None, event: dict):
_handle_deferrable_databricks_operator_completion(event, self.log)
class DatabricksSubmitRunDeferrableOperator(DatabricksSubmitRunOperator):
"""Deferrable version of ``DatabricksSubmitRunOperator``."""
def __init__(self, *args, **kwargs):
warnings.warn(
"`DatabricksSubmitRunDeferrableOperator` has been deprecated. "
"Please use `airflow.providers.databricks.operators.DatabricksSubmitRunOperator` with "
"`deferrable=True` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(deferrable=True, *args, **kwargs)
def execute(self, context):
hook = self._get_hook(caller="DatabricksSubmitRunDeferrableOperator")
json_normalised = normalise_json_content(self.json)
self.run_id = hook.submit_run(json_normalised)
_handle_deferrable_databricks_operator_execution(self, hook, self.log, context)
def execute_complete(self, context: dict | None, event: dict):
_handle_deferrable_databricks_operator_completion(event, self.log)
class DatabricksRunNowOperator(BaseOperator):
"""
Runs an existing Spark job run to Databricks using the api/2.1/jobs/run-now API endpoint.
See: https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow
There are two ways to instantiate this operator.
In the first way, you can take the JSON payload that you typically use
to call the ``api/2.1/jobs/run-now`` endpoint and pass it directly
to our ``DatabricksRunNowOperator`` through the ``json`` parameter.
For example ::
json = {
"job_id": 42,
"notebook_params": {
"dry-run": "true",
"oldest-time-to-consider": "1457570074236"
}
}
notebook_run = DatabricksRunNowOperator(task_id='notebook_run', json=json)
Another way to accomplish the same thing is to use the named parameters
of the ``DatabricksRunNowOperator`` directly. Note that there is exactly
one named parameter for each top level parameter in the ``run-now``
endpoint. In this method, your code would look like this: ::
job_id=42
notebook_params = {
"dry-run": "true",
"oldest-time-to-consider": "1457570074236"
}
python_params = ["douglas adams", "42"]
jar_params = ["douglas adams", "42"]
spark_submit_params = ["--class", "org.apache.spark.examples.SparkPi"]
notebook_run = DatabricksRunNowOperator(
job_id=job_id,
notebook_params=notebook_params,
python_params=python_params,
jar_params=jar_params,
spark_submit_params=spark_submit_params
)
In the case where both the json parameter **AND** the named parameters
are provided, they will be merged together. If there are conflicts during the merge,
the named parameters will take precedence and override the top level ``json`` keys.
Currently the named parameters that ``DatabricksRunNowOperator`` supports are
- ``job_id``
- ``job_name``
- ``json``
- ``notebook_params``
- ``python_params``
- ``python_named_parameters``
- ``jar_params``
- ``spark_submit_params``
- ``idempotency_token``
:param job_id: the job_id of the existing Databricks job.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow
:param job_name: the name of the existing Databricks job.
It must exist only one job with the specified name.
``job_id`` and ``job_name`` are mutually exclusive.
This field will be templated.
:param json: A JSON object containing API parameters which will be passed
directly to the ``api/2.1/jobs/run-now`` endpoint. The other named parameters
(i.e. ``notebook_params``, ``spark_submit_params``..) to this operator will
be merged with this json dictionary if they are provided.
If there are conflicts during the merge, the named parameters will
take precedence and override the top level json keys. (templated)
.. seealso::
For more information about templating see :ref:`concepts:jinja-templating`.
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow
:param notebook_params: A dict from keys to values for jobs with notebook task,
e.g. "notebook_params": {"name": "john doe", "age": "35"}.
The map is passed to the notebook and will be accessible through the
dbutils.widgets.get function. See Widgets for more information.
If not specified upon run-now, the triggered run will use the
job's base parameters. notebook_params cannot be
specified in conjunction with jar_params. The json representation
of this field (i.e. {"notebook_params":{"name":"john doe","age":"35"}})
cannot exceed 10,000 bytes.
This field will be templated.
.. seealso::
https://docs.databricks.com/user-guide/notebooks/widgets.html
:param python_params: A list of parameters for jobs with python tasks,
e.g. "python_params": ["john doe", "35"].
The parameters will be passed to python file as command line parameters.
If specified upon run-now, it would overwrite the parameters specified in job setting.
The json representation of this field (i.e. {"python_params":["john doe","35"]})
cannot exceed 10,000 bytes.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow
:param python_named_params: A list of named parameters for jobs with python wheel tasks,
e.g. "python_named_params": {"name": "john doe", "age": "35"}.
If specified upon run-now, it would overwrite the parameters specified in job setting.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow
:param jar_params: A list of parameters for jobs with JAR tasks,
e.g. "jar_params": ["john doe", "35"].
The parameters will be passed to JAR file as command line parameters.
If specified upon run-now, it would overwrite the parameters specified in
job setting.
The json representation of this field (i.e. {"jar_params":["john doe","35"]})
cannot exceed 10,000 bytes.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow
:param spark_submit_params: A list of parameters for jobs with spark submit task,
e.g. "spark_submit_params": ["--class", "org.apache.spark.examples.SparkPi"].
The parameters will be passed to spark-submit script as command line parameters.
If specified upon run-now, it would overwrite the parameters specified
in job setting.
The json representation of this field cannot exceed 10,000 bytes.
This field will be templated.
.. seealso::
https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow
:param idempotency_token: an optional token that can be used to guarantee the idempotency of job run
requests. If a run with the provided token already exists, the request does not create a new run but
returns the ID of the existing run instead. This token must have at most 64 characters.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
By default and in the common case this will be ``databricks_default``. To use
token based authentication, provide the key ``token`` in the extra field for the
connection and create the key ``host`` and leave the ``host`` field empty. (templated)
:param polling_period_seconds: Controls the rate which we poll for the result of
this run. By default, the operator will poll every 30 seconds.
:param databricks_retry_limit: Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
:param databricks_retry_delay: Number of seconds to wait between retries (it
might be a floating point number).
:param databricks_retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
:param do_xcom_push: Whether we should push run_id and run_page_url to xcom.
:param wait_for_termination: if we should wait for termination of the job run. ``True`` by default.
:param deferrable: Run operator in the deferrable mode.
"""
# Used in airflow.models.BaseOperator
template_fields: Sequence[str] = ("json", "databricks_conn_id")
template_ext: Sequence[str] = (".json-tpl",)
# Databricks brand color (blue) under white text
ui_color = "#1CB1C2"
ui_fgcolor = "#fff"
operator_extra_links = (DatabricksJobRunLink(),)
def __init__(
self,
*,
job_id: str | None = None,
job_name: str | None = None,
json: Any | None = None,
notebook_params: dict[str, str] | None = None,
python_params: list[str] | None = None,
jar_params: list[str] | None = None,
spark_submit_params: list[str] | None = None,
python_named_params: dict[str, str] | None = None,
idempotency_token: str | None = None,
databricks_conn_id: str = "databricks_default",
polling_period_seconds: int = 30,
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
databricks_retry_args: dict[Any, Any] | None = None,
do_xcom_push: bool = True,
wait_for_termination: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
"""Creates a new ``DatabricksRunNowOperator``."""
super().__init__(**kwargs)
self.json = json or {}
self.databricks_conn_id = databricks_conn_id
self.polling_period_seconds = polling_period_seconds
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
self.databricks_retry_args = databricks_retry_args
self.wait_for_termination = wait_for_termination
self.deferrable = deferrable
if job_id is not None:
self.json["job_id"] = job_id
if job_name is not None:
self.json["job_name"] = job_name
if "job_id" in self.json and "job_name" in self.json:
raise AirflowException("Argument 'job_name' is not allowed with argument 'job_id'")
if notebook_params is not None:
self.json["notebook_params"] = notebook_params
if python_params is not None:
self.json["python_params"] = python_params
if python_named_params is not None:
self.json["python_named_params"] = python_named_params
if jar_params is not None:
self.json["jar_params"] = jar_params
if spark_submit_params is not None:
self.json["spark_submit_params"] = spark_submit_params
if idempotency_token is not None:
self.json["idempotency_token"] = idempotency_token
self.json = normalise_json_content(self.json)
# This variable will be used in case our task gets killed.
self.run_id: int | None = None
self.do_xcom_push = do_xcom_push
@cached_property
def _hook(self):
return self._get_hook(caller="DatabricksRunNowOperator")
def _get_hook(self, caller: str) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
retry_args=self.databricks_retry_args,
caller=caller,
)
def execute(self, context: Context):
hook = self._hook
if "job_name" in self.json:
job_id = hook.find_job_id_by_name(self.json["job_name"])
if job_id is None:
raise AirflowException(f"Job ID for job name {self.json['job_name']} can not be found")
self.json["job_id"] = job_id
del self.json["job_name"]
self.run_id = hook.run_now(self.json)
if self.deferrable:
_handle_deferrable_databricks_operator_execution(self, hook, self.log, context)
else:
_handle_databricks_operator_execution(self, hook, self.log, context)
def on_kill(self):
if self.run_id:
self._hook.cancel_run(self.run_id)
self.log.info(
"Task: %s with run_id: %s was requested to be cancelled.", self.task_id, self.run_id
)
else:
self.log.error("Error: Task: %s with invalid run_id was requested to be cancelled.", self.task_id)
class DatabricksRunNowDeferrableOperator(DatabricksRunNowOperator):
"""Deferrable version of ``DatabricksRunNowOperator``."""
def __init__(self, *args, **kwargs):
warnings.warn(
"`DatabricksRunNowDeferrableOperator` has been deprecated. "
"Please use `airflow.providers.databricks.operators.DatabricksRunNowOperator` with "
"`deferrable=True` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(deferrable=True, *args, **kwargs)
def execute(self, context):
hook = self._get_hook(caller="DatabricksRunNowDeferrableOperator")
self.run_id = hook.run_now(self.json)
_handle_deferrable_databricks_operator_execution(self, hook, self.log, context)
def execute_complete(self, context: dict | None, event: dict):
_handle_deferrable_databricks_operator_completion(event, self.log)
| 33,530 | 46.629261 | 110 | py |
airflow | airflow-main/airflow/providers/databricks/operators/databricks_sql.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Databricks operators."""
from __future__ import annotations
import csv
import json
from typing import TYPE_CHECKING, Any, Sequence
from databricks.sql.types import Row
from databricks.sql.utils import ParamEscaper
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.common.sql.operators.sql import SQLExecuteQueryOperator
from airflow.providers.databricks.hooks.databricks_sql import DatabricksSqlHook
if TYPE_CHECKING:
from airflow.utils.context import Context
def make_serializable(val: Row):
return tuple(val)
class DatabricksSqlOperator(SQLExecuteQueryOperator):
"""
Executes SQL code in a Databricks SQL endpoint or a Databricks cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DatabricksSqlOperator`
:param databricks_conn_id: Reference to
:ref:`Databricks connection id<howto/connection:databricks>` (templated)
:param http_path: Optional string specifying HTTP path of Databricks SQL Endpoint or cluster.
If not specified, it should be either specified in the Databricks connection's extra parameters,
or ``sql_endpoint_name`` must be specified.
:param sql_endpoint_name: Optional name of Databricks SQL Endpoint. If not specified, ``http_path`` must
be provided as described above.
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file. (templated)
Template references are recognized by str ending in '.sql'
:param parameters: (optional) the parameters to render the SQL query with.
:param session_configuration: An optional dictionary of Spark session parameters. Defaults to None.
If not specified, it could be specified in the Databricks connection's extra parameters.
:param client_parameters: Additional parameters internal to Databricks SQL Connector parameters
:param http_headers: An optional list of (k, v) pairs that will be set as HTTP headers on every request.
(templated)
:param catalog: An optional initial catalog to use. Requires DBR version 9.0+ (templated)
:param schema: An optional initial schema to use. Requires DBR version 9.0+ (templated)
:param output_path: optional string specifying the file to which write selected data. (templated)
:param output_format: format of output data if ``output_path` is specified.
Possible values are ``csv``, ``json``, ``jsonl``. Default is ``csv``.
:param csv_params: parameters that will be passed to the ``csv.DictWriter`` class used to write CSV data.
"""
template_fields: Sequence[str] = tuple(
{"_output_path", "schema", "catalog", "http_headers", "databricks_conn_id"}
| set(SQLExecuteQueryOperator.template_fields)
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
def __init__(
self,
*,
databricks_conn_id: str = DatabricksSqlHook.default_conn_name,
http_path: str | None = None,
sql_endpoint_name: str | None = None,
session_configuration=None,
http_headers: list[tuple[str, str]] | None = None,
catalog: str | None = None,
schema: str | None = None,
output_path: str | None = None,
output_format: str = "csv",
csv_params: dict[str, Any] | None = None,
client_parameters: dict[str, Any] | None = None,
**kwargs,
) -> None:
super().__init__(conn_id=databricks_conn_id, **kwargs)
self.databricks_conn_id = databricks_conn_id
self._output_path = output_path
self._output_format = output_format
self._csv_params = csv_params
self.http_path = http_path
self.sql_endpoint_name = sql_endpoint_name
self.session_configuration = session_configuration
self.client_parameters = {} if client_parameters is None else client_parameters
self.hook_params = kwargs.pop("hook_params", {})
self.http_headers = http_headers
self.catalog = catalog
self.schema = schema
def get_db_hook(self) -> DatabricksSqlHook:
hook_params = {
"http_path": self.http_path,
"session_configuration": self.session_configuration,
"sql_endpoint_name": self.sql_endpoint_name,
"http_headers": self.http_headers,
"catalog": self.catalog,
"schema": self.schema,
"caller": "DatabricksSqlOperator",
**self.client_parameters,
**self.hook_params,
}
return DatabricksSqlHook(self.databricks_conn_id, **hook_params)
def _should_run_output_processing(self) -> bool:
return self.do_xcom_push or bool(self._output_path)
def _process_output(self, results: list[Any], descriptions: list[Sequence[Sequence] | None]) -> list[Any]:
if not self._output_path:
return list(zip(descriptions, [[make_serializable(row) for row in res] for res in results]))
if not self._output_format:
raise AirflowException("Output format should be specified!")
# Output to a file only the result of last query
last_description = descriptions[-1]
last_results = results[-1]
if last_description is None:
raise AirflowException("There is missing description present for the output file. .")
field_names = [field[0] for field in last_description]
if self._output_format.lower() == "csv":
with open(self._output_path, "w", newline="") as file:
if self._csv_params:
csv_params = self._csv_params
else:
csv_params = {}
write_header = csv_params.get("header", True)
if "header" in csv_params:
del csv_params["header"]
writer = csv.DictWriter(file, fieldnames=field_names, **csv_params)
if write_header:
writer.writeheader()
for row in last_results:
writer.writerow(row.asDict())
elif self._output_format.lower() == "json":
with open(self._output_path, "w") as file:
file.write(json.dumps([row.asDict() for row in last_results]))
elif self._output_format.lower() == "jsonl":
with open(self._output_path, "w") as file:
for row in last_results:
file.write(json.dumps(row.asDict()))
file.write("\n")
else:
raise AirflowException(f"Unsupported output format: '{self._output_format}'")
return list(zip(descriptions, [[make_serializable(row) for row in res] for res in results]))
COPY_INTO_APPROVED_FORMATS = ["CSV", "JSON", "AVRO", "ORC", "PARQUET", "TEXT", "BINARYFILE"]
class DatabricksCopyIntoOperator(BaseOperator):
"""
Executes COPY INTO command in a Databricks SQL endpoint or a Databricks cluster.
COPY INTO command is constructed from individual pieces, that are described in
`documentation <https://docs.databricks.com/sql/language-manual/delta-copy-into.html>`_.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DatabricksSqlCopyIntoOperator`
:param table_name: Required name of the table. (templated)
:param file_location: Required location of files to import. (templated)
:param file_format: Required file format. Supported formats are
``CSV``, ``JSON``, ``AVRO``, ``ORC``, ``PARQUET``, ``TEXT``, ``BINARYFILE``.
:param databricks_conn_id: Reference to
:ref:`Databricks connection id<howto/connection:databricks>` (templated)
:param http_path: Optional string specifying HTTP path of Databricks SQL Endpoint or cluster.
If not specified, it should be either specified in the Databricks connection's extra parameters,
or ``sql_endpoint_name`` must be specified.
:param sql_endpoint_name: Optional name of Databricks SQL Endpoint.
If not specified, ``http_path`` must be provided as described above.
:param session_configuration: An optional dictionary of Spark session parameters. Defaults to None.
If not specified, it could be specified in the Databricks connection's extra parameters.
:param http_headers: An optional list of (k, v) pairs that will be set as HTTP headers on every request
:param catalog: An optional initial catalog to use. Requires DBR version 9.0+
:param schema: An optional initial schema to use. Requires DBR version 9.0+
:param client_parameters: Additional parameters internal to Databricks SQL Connector parameters
:param files: optional list of files to import. Can't be specified together with ``pattern``. (templated)
:param pattern: optional regex string to match file names to import.
Can't be specified together with ``files``.
:param expression_list: optional string that will be used in the ``SELECT`` expression.
:param credential: optional credential configuration for authentication against a source location.
:param storage_credential: optional Unity Catalog storage credential for destination.
:param encryption: optional encryption configuration for a specified location.
:param format_options: optional dictionary with options specific for a given file format.
:param force_copy: optional bool to control forcing of data import
(could be also specified in ``copy_options``).
:param validate: optional configuration for schema & data validation. ``True`` forces validation
of all rows, integer number - validate only N first rows
:param copy_options: optional dictionary of copy options. Right now only ``force`` option is supported.
"""
template_fields: Sequence[str] = (
"_file_location",
"_files",
"_table_name",
"databricks_conn_id",
)
def __init__(
self,
*,
table_name: str,
file_location: str,
file_format: str,
databricks_conn_id: str = DatabricksSqlHook.default_conn_name,
http_path: str | None = None,
sql_endpoint_name: str | None = None,
session_configuration=None,
http_headers: list[tuple[str, str]] | None = None,
client_parameters: dict[str, Any] | None = None,
catalog: str | None = None,
schema: str | None = None,
files: list[str] | None = None,
pattern: str | None = None,
expression_list: str | None = None,
credential: dict[str, str] | None = None,
storage_credential: str | None = None,
encryption: dict[str, str] | None = None,
format_options: dict[str, str] | None = None,
force_copy: bool | None = None,
copy_options: dict[str, str] | None = None,
validate: bool | int | None = None,
**kwargs,
) -> None:
"""Creates a new ``DatabricksSqlOperator``."""
super().__init__(**kwargs)
if files is not None and pattern is not None:
raise AirflowException("Only one of 'pattern' or 'files' should be specified")
if table_name == "":
raise AirflowException("table_name shouldn't be empty")
if file_location == "":
raise AirflowException("file_location shouldn't be empty")
if file_format not in COPY_INTO_APPROVED_FORMATS:
raise AirflowException(f"file_format '{file_format}' isn't supported")
self._files = files
self._pattern = pattern
self._file_format = file_format
self.databricks_conn_id = databricks_conn_id
self._http_path = http_path
self._sql_endpoint_name = sql_endpoint_name
self.session_config = session_configuration
self._table_name = table_name
self._catalog = catalog
self._schema = schema
self._file_location = file_location
self._expression_list = expression_list
self._credential = credential
self._storage_credential = storage_credential
self._encryption = encryption
self._format_options = format_options
self._copy_options = copy_options or {}
self._validate = validate
self._http_headers = http_headers
self._client_parameters = client_parameters or {}
if force_copy is not None:
self._copy_options["force"] = "true" if force_copy else "false"
def _get_hook(self) -> DatabricksSqlHook:
return DatabricksSqlHook(
self.databricks_conn_id,
http_path=self._http_path,
session_configuration=self.session_config,
sql_endpoint_name=self._sql_endpoint_name,
http_headers=self._http_headers,
catalog=self._catalog,
schema=self._schema,
caller="DatabricksCopyIntoOperator",
**self._client_parameters,
)
@staticmethod
def _generate_options(
name: str,
escaper: ParamEscaper,
opts: dict[str, str] | None = None,
escape_key: bool = True,
) -> str:
formatted_opts = ""
if opts is not None and len(opts) > 0:
pairs = [
f"{escaper.escape_item(k) if escape_key else k} = {escaper.escape_item(v)}"
for k, v in opts.items()
]
formatted_opts = f"{name} ({', '.join(pairs)})"
return formatted_opts
def _create_sql_query(self) -> str:
escaper = ParamEscaper()
maybe_with = ""
if self._encryption is not None or self._credential is not None:
maybe_encryption = ""
if self._encryption is not None:
maybe_encryption = self._generate_options("ENCRYPTION", escaper, self._encryption, False)
maybe_credential = ""
if self._credential is not None:
maybe_credential = self._generate_options("CREDENTIAL", escaper, self._credential, False)
maybe_with = f" WITH ({maybe_credential} {maybe_encryption})"
location = escaper.escape_item(self._file_location) + maybe_with
if self._expression_list is not None:
location = f"(SELECT {self._expression_list} FROM {location})"
files_or_pattern = ""
if self._pattern is not None:
files_or_pattern = f"PATTERN = {escaper.escape_item(self._pattern)}\n"
elif self._files is not None:
files_or_pattern = f"FILES = {escaper.escape_item(self._files)}\n"
format_options = self._generate_options("FORMAT_OPTIONS", escaper, self._format_options) + "\n"
copy_options = self._generate_options("COPY_OPTIONS", escaper, self._copy_options) + "\n"
storage_cred = ""
if self._storage_credential:
storage_cred = f" WITH (CREDENTIAL {self._storage_credential})"
validation = ""
if self._validate is not None:
if isinstance(self._validate, bool):
if self._validate:
validation = "VALIDATE ALL\n"
elif isinstance(self._validate, int):
if self._validate < 0:
raise AirflowException(
"Number of rows for validation should be positive, got: " + str(self._validate)
)
validation = f"VALIDATE {self._validate} ROWS\n"
else:
raise AirflowException(
"Incorrect data type for validate parameter: " + str(type(self._validate))
)
# TODO: think on how to make sure that table_name and expression_list aren't used for SQL injection
sql = f"""COPY INTO {self._table_name}{storage_cred}
FROM {location}
FILEFORMAT = {self._file_format}
{validation}{files_or_pattern}{format_options}{copy_options}
"""
return sql.strip()
def execute(self, context: Context) -> Any:
sql = self._create_sql_query()
self.log.info("Executing: %s", sql)
hook = self._get_hook()
hook.run(sql)
| 17,010 | 46.252778 | 110 | py |
airflow | airflow-main/airflow/providers/databricks/operators/databricks_repos.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Databricks operators."""
from __future__ import annotations
import re
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from urllib.parse import urlsplit
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.databricks.hooks.databricks import DatabricksHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class DatabricksReposCreateOperator(BaseOperator):
"""
Creates, and optionally checks out, a Databricks Repo using the POST api/2.0/repos API endpoint.
See: https://docs.databricks.com/dev-tools/api/latest/repos.html#operation/create-repo
:param git_url: Required HTTPS URL of a Git repository
:param git_provider: Optional name of Git provider. Must be provided if we can't guess its name from URL.
:param repo_path: optional path for a repository. Must be in the format ``/Repos/{folder}/{repo-name}``.
If not specified, it will be created in the user's directory.
:param branch: optional name of branch to check out.
:param tag: optional name of tag to checkout.
:param ignore_existing_repo: don't throw exception if repository with given path already exists.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
By default and in the common case this will be ``databricks_default``. To use
token based authentication, provide the key ``token`` in the extra field for the
connection and create the key ``host`` and leave the ``host`` field empty. (templated)
:param databricks_retry_limit: Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
:param databricks_retry_delay: Number of seconds to wait between retries (it
might be a floating point number).
"""
# Used in airflow.models.BaseOperator
template_fields: Sequence[str] = ("repo_path", "tag", "branch", "databricks_conn_id")
__git_providers__ = {
"github.com": "gitHub",
"dev.azure.com": "azureDevOpsServices",
"gitlab.com": "gitLab",
"bitbucket.org": "bitbucketCloud",
}
__aws_code_commit_regexp__ = re.compile(r"^git-codecommit\.[^.]+\.amazonaws.com$")
__repos_path_regexp__ = re.compile(r"/Repos/[^/]+/[^/]+/?$")
def __init__(
self,
*,
git_url: str,
git_provider: str | None = None,
branch: str | None = None,
tag: str | None = None,
repo_path: str | None = None,
ignore_existing_repo: bool = False,
databricks_conn_id: str = "databricks_default",
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
**kwargs,
) -> None:
"""Creates a new ``DatabricksReposCreateOperator``."""
super().__init__(**kwargs)
self.databricks_conn_id = databricks_conn_id
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
self.git_url = git_url
self.ignore_existing_repo = ignore_existing_repo
if git_provider is None:
self.git_provider = self.__detect_repo_provider__(git_url)
if self.git_provider is None:
raise AirflowException(
f"git_provider isn't specified and couldn't be guessed for URL {git_url}"
)
else:
self.git_provider = git_provider
self.repo_path = repo_path
if branch is not None and tag is not None:
raise AirflowException("Only one of branch or tag should be provided, but not both")
self.branch = branch
self.tag = tag
@staticmethod
def __detect_repo_provider__(url):
provider = None
try:
netloc = urlsplit(url).netloc
idx = netloc.rfind("@")
if idx != -1:
netloc = netloc[(idx + 1) :]
netloc = netloc.lower()
provider = DatabricksReposCreateOperator.__git_providers__.get(netloc)
if provider is None and DatabricksReposCreateOperator.__aws_code_commit_regexp__.match(netloc):
provider = "awsCodeCommit"
except ValueError:
pass
return provider
@cached_property
def _hook(self) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
caller="DatabricksReposCreateOperator",
)
def execute(self, context: Context):
"""
Creates a Databricks Repo.
:param context: context
:return: Repo ID
"""
payload = {
"url": self.git_url,
"provider": self.git_provider,
}
if self.repo_path is not None:
if not self.__repos_path_regexp__.match(self.repo_path):
raise AirflowException(
f"repo_path should have form of /Repos/{{folder}}/{{repo-name}}, got '{self.repo_path}'"
)
payload["path"] = self.repo_path
existing_repo_id = None
if self.repo_path is not None:
existing_repo_id = self._hook.get_repo_by_path(self.repo_path)
if existing_repo_id is not None and not self.ignore_existing_repo:
raise AirflowException(f"Repo with path '{self.repo_path}' already exists")
if existing_repo_id is None:
result = self._hook.create_repo(payload)
repo_id = result["id"]
else:
repo_id = existing_repo_id
# update repo if necessary
if self.branch is not None:
self._hook.update_repo(str(repo_id), {"branch": str(self.branch)})
elif self.tag is not None:
self._hook.update_repo(str(repo_id), {"tag": str(self.tag)})
return repo_id
class DatabricksReposUpdateOperator(BaseOperator):
"""
Updates specified repository to a given branch or tag using the PATCH api/2.0/repos API endpoint.
See: https://docs.databricks.com/dev-tools/api/latest/repos.html#operation/update-repo
:param branch: optional name of branch to update to. Should be specified if ``tag`` is omitted
:param tag: optional name of tag to update to. Should be specified if ``branch`` is omitted
:param repo_id: optional ID of existing repository. Should be specified if ``repo_path`` is omitted
:param repo_path: optional path of existing repository. Should be specified if ``repo_id`` is omitted
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
By default and in the common case this will be ``databricks_default``. To use
token based authentication, provide the key ``token`` in the extra field for the
connection and create the key ``host`` and leave the ``host`` field empty. (templated)
:param databricks_retry_limit: Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
:param databricks_retry_delay: Number of seconds to wait between retries (it
might be a floating point number).
"""
# Used in airflow.models.BaseOperator
template_fields: Sequence[str] = ("repo_path", "tag", "branch", "databricks_conn_id")
def __init__(
self,
*,
branch: str | None = None,
tag: str | None = None,
repo_id: str | None = None,
repo_path: str | None = None,
databricks_conn_id: str = "databricks_default",
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
**kwargs,
) -> None:
"""Creates a new ``DatabricksReposUpdateOperator``."""
super().__init__(**kwargs)
self.databricks_conn_id = databricks_conn_id
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
if branch is not None and tag is not None:
raise AirflowException("Only one of branch or tag should be provided, but not both")
if branch is None and tag is None:
raise AirflowException("One of branch or tag should be provided")
if repo_id is not None and repo_path is not None:
raise AirflowException("Only one of repo_id or repo_path should be provided, but not both")
if repo_id is None and repo_path is None:
raise AirflowException("One of repo_id or repo_path should be provided")
self.repo_path = repo_path
self.repo_id = repo_id
self.branch = branch
self.tag = tag
@cached_property
def _hook(self) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
caller="DatabricksReposUpdateOperator",
)
def execute(self, context: Context):
if self.repo_path is not None:
self.repo_id = self._hook.get_repo_by_path(self.repo_path)
if self.repo_id is None:
raise AirflowException(f"Can't find Repo ID for path '{self.repo_path}'")
if self.branch is not None:
payload = {"branch": str(self.branch)}
else:
payload = {"tag": str(self.tag)}
result = self._hook.update_repo(str(self.repo_id), payload)
return result["head_commit_id"]
class DatabricksReposDeleteOperator(BaseOperator):
"""
Deletes specified repository using the DELETE api/2.0/repos API endpoint.
See: https://docs.databricks.com/dev-tools/api/latest/repos.html#operation/delete-repo
:param repo_id: optional ID of existing repository. Should be specified if ``repo_path`` is omitted
:param repo_path: optional path of existing repository. Should be specified if ``repo_id`` is omitted
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
By default and in the common case this will be ``databricks_default``. To use
token based authentication, provide the key ``token`` in the extra field for the
connection and create the key ``host`` and leave the ``host`` field empty. (templated)
:param databricks_retry_limit: Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
:param databricks_retry_delay: Number of seconds to wait between retries (it
might be a floating point number).
"""
# Used in airflow.models.BaseOperator
template_fields: Sequence[str] = ("repo_path", "databricks_conn_id")
def __init__(
self,
*,
repo_id: str | None = None,
repo_path: str | None = None,
databricks_conn_id: str = "databricks_default",
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
**kwargs,
) -> None:
"""Creates a new ``DatabricksReposDeleteOperator``."""
super().__init__(**kwargs)
self.databricks_conn_id = databricks_conn_id
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
if repo_id is not None and repo_path is not None:
raise AirflowException("Only one of repo_id or repo_path should be provided, but not both")
if repo_id is None and repo_path is None:
raise AirflowException("One of repo_id repo_path tag should be provided")
self.repo_path = repo_path
self.repo_id = repo_id
@cached_property
def _hook(self) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
caller="DatabricksReposDeleteOperator",
)
def execute(self, context: Context):
if self.repo_path is not None:
self.repo_id = self._hook.get_repo_by_path(self.repo_path)
if self.repo_id is None:
raise AirflowException(f"Can't find Repo ID for path '{self.repo_path}'")
self._hook.delete_repo(str(self.repo_id))
| 13,181 | 42.94 | 109 | py |
airflow | airflow-main/airflow/providers/databricks/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/databricks/triggers/databricks.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any
from airflow.providers.databricks.hooks.databricks import DatabricksHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class DatabricksExecutionTrigger(BaseTrigger):
"""
The trigger handles the logic of async communication with DataBricks API.
:param run_id: id of the run
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
:param polling_period_seconds: Controls the rate of the poll for the result of this run.
By default, the trigger will poll every 30 seconds.
:param retry_limit: The number of times to retry the connection in case of service outages.
:param retry_delay: The number of seconds to wait between retries.
:param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
:param run_page_url: The run page url.
"""
def __init__(
self,
run_id: int,
databricks_conn_id: str,
polling_period_seconds: int = 30,
retry_limit: int = 3,
retry_delay: int = 10,
retry_args: dict[Any, Any] | None = None,
run_page_url: str | None = None,
) -> None:
super().__init__()
self.run_id = run_id
self.databricks_conn_id = databricks_conn_id
self.polling_period_seconds = polling_period_seconds
self.retry_limit = retry_limit
self.retry_delay = retry_delay
self.retry_args = retry_args
self.run_page_url = run_page_url
self.hook = DatabricksHook(
databricks_conn_id,
retry_limit=self.retry_limit,
retry_delay=self.retry_delay,
retry_args=retry_args,
)
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.databricks.triggers.databricks.DatabricksExecutionTrigger",
{
"run_id": self.run_id,
"databricks_conn_id": self.databricks_conn_id,
"polling_period_seconds": self.polling_period_seconds,
"retry_limit": self.retry_limit,
"retry_delay": self.retry_delay,
"retry_args": self.retry_args,
"run_page_url": self.run_page_url,
},
)
async def run(self):
async with self.hook:
while True:
run_state = await self.hook.a_get_run_state(self.run_id)
if run_state.is_terminal:
yield TriggerEvent(
{
"run_id": self.run_id,
"run_page_url": self.run_page_url,
"run_state": run_state.to_json(),
}
)
return
else:
self.log.info(
"run-id %s in run state %s. sleeping for %s seconds",
self.run_id,
run_state,
self.polling_period_seconds,
)
await asyncio.sleep(self.polling_period_seconds)
| 3,997 | 38.584158 | 107 | py |
airflow | airflow-main/airflow/providers/databricks/triggers/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/databricks/hooks/databricks.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Databricks hook.
This hook enable the submitting and running of jobs to the Databricks platform. Internally the
operators talk to the
``api/2.1/jobs/run-now``
`endpoint <https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunNow>_`
or the ``api/2.1/jobs/runs/submit``
`endpoint <https://docs.databricks.com/dev-tools/api/latest/jobs.html#operation/JobsRunsSubmit>`_.
"""
from __future__ import annotations
import json
from typing import Any
from requests import exceptions as requests_exceptions
from airflow.exceptions import AirflowException
from airflow.providers.databricks.hooks.databricks_base import BaseDatabricksHook
RESTART_CLUSTER_ENDPOINT = ("POST", "api/2.0/clusters/restart")
START_CLUSTER_ENDPOINT = ("POST", "api/2.0/clusters/start")
TERMINATE_CLUSTER_ENDPOINT = ("POST", "api/2.0/clusters/delete")
RUN_NOW_ENDPOINT = ("POST", "api/2.1/jobs/run-now")
SUBMIT_RUN_ENDPOINT = ("POST", "api/2.1/jobs/runs/submit")
GET_RUN_ENDPOINT = ("GET", "api/2.1/jobs/runs/get")
CANCEL_RUN_ENDPOINT = ("POST", "api/2.1/jobs/runs/cancel")
DELETE_RUN_ENDPOINT = ("POST", "api/2.1/jobs/runs/delete")
REPAIR_RUN_ENDPOINT = ("POST", "api/2.1/jobs/runs/repair")
OUTPUT_RUNS_JOB_ENDPOINT = ("GET", "api/2.1/jobs/runs/get-output")
CANCEL_ALL_RUNS_ENDPOINT = ("POST", "api/2.1/jobs/runs/cancel-all")
INSTALL_LIBS_ENDPOINT = ("POST", "api/2.0/libraries/install")
UNINSTALL_LIBS_ENDPOINT = ("POST", "api/2.0/libraries/uninstall")
LIST_JOBS_ENDPOINT = ("GET", "api/2.1/jobs/list")
WORKSPACE_GET_STATUS_ENDPOINT = ("GET", "api/2.0/workspace/get-status")
RUN_LIFE_CYCLE_STATES = ["PENDING", "RUNNING", "TERMINATING", "TERMINATED", "SKIPPED", "INTERNAL_ERROR"]
SPARK_VERSIONS_ENDPOINT = ("GET", "api/2.0/clusters/spark-versions")
class RunState:
"""Utility class for the run state concept of Databricks runs."""
def __init__(
self, life_cycle_state: str, result_state: str = "", state_message: str = "", *args, **kwargs
) -> None:
self.life_cycle_state = life_cycle_state
self.result_state = result_state
self.state_message = state_message
@property
def is_terminal(self) -> bool:
"""True if the current state is a terminal state."""
if self.life_cycle_state not in RUN_LIFE_CYCLE_STATES:
raise AirflowException(
(
"Unexpected life cycle state: {}: If the state has "
"been introduced recently, please check the Databricks user "
"guide for troubleshooting information"
).format(self.life_cycle_state)
)
return self.life_cycle_state in ("TERMINATED", "SKIPPED", "INTERNAL_ERROR")
@property
def is_successful(self) -> bool:
"""True if the result state is SUCCESS."""
return self.result_state == "SUCCESS"
def __eq__(self, other: object) -> bool:
if not isinstance(other, RunState):
return NotImplemented
return (
self.life_cycle_state == other.life_cycle_state
and self.result_state == other.result_state
and self.state_message == other.state_message
)
def __repr__(self) -> str:
return str(self.__dict__)
def to_json(self) -> str:
return json.dumps(self.__dict__)
@classmethod
def from_json(cls, data: str) -> RunState:
return RunState(**json.loads(data))
class DatabricksHook(BaseDatabricksHook):
"""
Interact with Databricks.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
:param timeout_seconds: The amount of time in seconds the requests library
will wait before timing-out.
:param retry_limit: The number of times to retry the connection in case of
service outages.
:param retry_delay: The number of seconds to wait between retries (it
might be a floating point number).
:param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
"""
hook_name = "Databricks"
def __init__(
self,
databricks_conn_id: str = BaseDatabricksHook.default_conn_name,
timeout_seconds: int = 180,
retry_limit: int = 3,
retry_delay: float = 1.0,
retry_args: dict[Any, Any] | None = None,
caller: str = "DatabricksHook",
) -> None:
super().__init__(databricks_conn_id, timeout_seconds, retry_limit, retry_delay, retry_args, caller)
def run_now(self, json: dict) -> int:
"""
Utility function to call the ``api/2.0/jobs/run-now`` endpoint.
:param json: The data used in the body of the request to the ``run-now`` endpoint.
:return: the run_id as an int
"""
response = self._do_api_call(RUN_NOW_ENDPOINT, json)
return response["run_id"]
def submit_run(self, json: dict) -> int:
"""
Utility function to call the ``api/2.0/jobs/runs/submit`` endpoint.
:param json: The data used in the body of the request to the ``submit`` endpoint.
:return: the run_id as an int
"""
response = self._do_api_call(SUBMIT_RUN_ENDPOINT, json)
return response["run_id"]
def list_jobs(
self, limit: int = 25, offset: int = 0, expand_tasks: bool = False, job_name: str | None = None
) -> list[dict[str, Any]]:
"""
Lists the jobs in the Databricks Job Service.
:param limit: The limit/batch size used to retrieve jobs.
:param offset: The offset of the first job to return, relative to the most recently created job.
:param expand_tasks: Whether to include task and cluster details in the response.
:param job_name: Optional name of a job to search.
:return: A list of jobs.
"""
has_more = True
all_jobs = []
while has_more:
payload: dict[str, Any] = {
"limit": limit,
"expand_tasks": expand_tasks,
"offset": offset,
}
if job_name:
payload["name"] = job_name
response = self._do_api_call(LIST_JOBS_ENDPOINT, payload)
jobs = response.get("jobs", [])
if job_name:
all_jobs += [j for j in jobs if j["settings"]["name"] == job_name]
else:
all_jobs += jobs
has_more = response.get("has_more", False)
if has_more:
offset += len(jobs)
return all_jobs
def find_job_id_by_name(self, job_name: str) -> int | None:
"""
Finds job id by its name. If there are multiple jobs with the same name, raises AirflowException.
:param job_name: The name of the job to look up.
:return: The job_id as an int or None if no job was found.
"""
matching_jobs = self.list_jobs(job_name=job_name)
if len(matching_jobs) > 1:
raise AirflowException(
f"There are more than one job with name {job_name}. Please delete duplicated jobs first"
)
if not matching_jobs:
return None
else:
return matching_jobs[0]["job_id"]
def get_run_page_url(self, run_id: int) -> str:
"""
Retrieves run_page_url.
:param run_id: id of the run
:return: URL of the run page
"""
json = {"run_id": run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response["run_page_url"]
async def a_get_run_page_url(self, run_id: int) -> str:
"""
Async version of `get_run_page_url()`.
:param run_id: id of the run
:return: URL of the run page
"""
json = {"run_id": run_id}
response = await self._a_do_api_call(GET_RUN_ENDPOINT, json)
return response["run_page_url"]
def get_job_id(self, run_id: int) -> int:
"""
Retrieves job_id from run_id.
:param run_id: id of the run
:return: Job id for given Databricks run
"""
json = {"run_id": run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response["job_id"]
def get_run_state(self, run_id: int) -> RunState:
"""
Retrieves run state of the run.
Please note that any Airflow tasks that call the ``get_run_state`` method will result in
failure unless you have enabled xcom pickling. This can be done using the following
environment variable: ``AIRFLOW__CORE__ENABLE_XCOM_PICKLING``
If you do not want to enable xcom pickling, use the ``get_run_state_str`` method to get
a string describing state, or ``get_run_state_lifecycle``, ``get_run_state_result``, or
``get_run_state_message`` to get individual components of the run state.
:param run_id: id of the run
:return: state of the run
"""
json = {"run_id": run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
state = response["state"]
return RunState(**state)
async def a_get_run_state(self, run_id: int) -> RunState:
"""
Async version of `get_run_state()`.
:param run_id: id of the run
:return: state of the run
"""
json = {"run_id": run_id}
response = await self._a_do_api_call(GET_RUN_ENDPOINT, json)
state = response["state"]
return RunState(**state)
def get_run(self, run_id: int) -> dict[str, Any]:
"""
Retrieve run information.
:param run_id: id of the run
:return: state of the run
"""
json = {"run_id": run_id}
response = self._do_api_call(GET_RUN_ENDPOINT, json)
return response
async def a_get_run(self, run_id: int) -> dict[str, Any]:
"""
Async version of `get_run`.
:param run_id: id of the run
:return: state of the run
"""
json = {"run_id": run_id}
response = await self._a_do_api_call(GET_RUN_ENDPOINT, json)
return response
def get_run_state_str(self, run_id: int) -> str:
"""
Return the string representation of RunState.
:param run_id: id of the run
:return: string describing run state
"""
state = self.get_run_state(run_id)
run_state_str = (
f"State: {state.life_cycle_state}. Result: {state.result_state}. {state.state_message}"
)
return run_state_str
def get_run_state_lifecycle(self, run_id: int) -> str:
"""
Returns the lifecycle state of the run.
:param run_id: id of the run
:return: string with lifecycle state
"""
return self.get_run_state(run_id).life_cycle_state
def get_run_state_result(self, run_id: int) -> str:
"""
Returns the resulting state of the run.
:param run_id: id of the run
:return: string with resulting state
"""
return self.get_run_state(run_id).result_state
def get_run_state_message(self, run_id: int) -> str:
"""
Returns the state message for the run.
:param run_id: id of the run
:return: string with state message
"""
return self.get_run_state(run_id).state_message
def get_run_output(self, run_id: int) -> dict:
"""
Retrieves run output of the run.
:param run_id: id of the run
:return: output of the run
"""
json = {"run_id": run_id}
run_output = self._do_api_call(OUTPUT_RUNS_JOB_ENDPOINT, json)
return run_output
def cancel_run(self, run_id: int) -> None:
"""
Cancels the run.
:param run_id: id of the run
"""
json = {"run_id": run_id}
self._do_api_call(CANCEL_RUN_ENDPOINT, json)
def cancel_all_runs(self, job_id: int) -> None:
"""
Cancels all active runs of a job. The runs are canceled asynchronously.
:param job_id: The canonical identifier of the job to cancel all runs of
"""
json = {"job_id": job_id}
self._do_api_call(CANCEL_ALL_RUNS_ENDPOINT, json)
def delete_run(self, run_id: int) -> None:
"""
Deletes a non-active run.
:param run_id: id of the run
"""
json = {"run_id": run_id}
self._do_api_call(DELETE_RUN_ENDPOINT, json)
def repair_run(self, json: dict) -> None:
"""
Re-run one or more tasks.
:param json: repair a job run.
"""
self._do_api_call(REPAIR_RUN_ENDPOINT, json)
def restart_cluster(self, json: dict) -> None:
"""
Restarts the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(RESTART_CLUSTER_ENDPOINT, json)
def start_cluster(self, json: dict) -> None:
"""
Starts the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(START_CLUSTER_ENDPOINT, json)
def terminate_cluster(self, json: dict) -> None:
"""
Terminates the cluster.
:param json: json dictionary containing cluster specification.
"""
self._do_api_call(TERMINATE_CLUSTER_ENDPOINT, json)
def install(self, json: dict) -> None:
"""
Install libraries on the cluster.
Utility function to call the ``2.0/libraries/install`` endpoint.
:param json: json dictionary containing cluster_id and an array of library
"""
self._do_api_call(INSTALL_LIBS_ENDPOINT, json)
def uninstall(self, json: dict) -> None:
"""
Uninstall libraries on the cluster.
Utility function to call the ``2.0/libraries/uninstall`` endpoint.
:param json: json dictionary containing cluster_id and an array of library
"""
self._do_api_call(UNINSTALL_LIBS_ENDPOINT, json)
def update_repo(self, repo_id: str, json: dict[str, Any]) -> dict:
"""
Updates given Databricks Repos.
:param repo_id: ID of Databricks Repos
:param json: payload
:return: metadata from update
"""
repos_endpoint = ("PATCH", f"api/2.0/repos/{repo_id}")
return self._do_api_call(repos_endpoint, json)
def delete_repo(self, repo_id: str):
"""
Deletes given Databricks Repos.
:param repo_id: ID of Databricks Repos
:return:
"""
repos_endpoint = ("DELETE", f"api/2.0/repos/{repo_id}")
self._do_api_call(repos_endpoint)
def create_repo(self, json: dict[str, Any]) -> dict:
"""
Creates a Databricks Repos.
:param json: payload
:return:
"""
repos_endpoint = ("POST", "api/2.0/repos")
return self._do_api_call(repos_endpoint, json)
def get_repo_by_path(self, path: str) -> str | None:
"""
Obtains Repos ID by path.
:param path: path to a repository
:return: Repos ID if it exists, None if doesn't.
"""
try:
result = self._do_api_call(WORKSPACE_GET_STATUS_ENDPOINT, {"path": path}, wrap_http_errors=False)
if result.get("object_type", "") == "REPO":
return str(result["object_id"])
except requests_exceptions.HTTPError as e:
if e.response.status_code != 404:
raise e
return None
def test_connection(self) -> tuple[bool, str]:
"""Test the Databricks connectivity from UI."""
hook = DatabricksHook(databricks_conn_id=self.databricks_conn_id)
try:
hook._do_api_call(endpoint_info=SPARK_VERSIONS_ENDPOINT).get("versions")
status = True
message = "Connection successfully tested"
except Exception as e:
status = False
message = str(e)
return status, message
| 16,773 | 33.302658 | 109 | py |
airflow | airflow-main/airflow/providers/databricks/hooks/databricks_sql.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from contextlib import closing
from copy import copy
from typing import Any, Callable, Iterable, Mapping
from databricks import sql # type: ignore[attr-defined]
from databricks.sql.client import Connection # type: ignore[attr-defined]
from airflow.exceptions import AirflowException
from airflow.providers.common.sql.hooks.sql import DbApiHook, return_single_query_results
from airflow.providers.databricks.hooks.databricks_base import BaseDatabricksHook
LIST_SQL_ENDPOINTS_ENDPOINT = ("GET", "api/2.0/sql/endpoints")
class DatabricksSqlHook(BaseDatabricksHook, DbApiHook):
"""Hook to interact with Databricks SQL.
:param databricks_conn_id: Reference to the
:ref:`Databricks connection <howto/connection:databricks>`.
:param http_path: Optional string specifying HTTP path of Databricks SQL Endpoint or cluster.
If not specified, it should be either specified in the Databricks connection's extra parameters,
or ``sql_endpoint_name`` must be specified.
:param sql_endpoint_name: Optional name of Databricks SQL Endpoint. If not specified, ``http_path``
must be provided as described above.
:param session_configuration: An optional dictionary of Spark session parameters. Defaults to None.
If not specified, it could be specified in the Databricks connection's extra parameters.
:param http_headers: An optional list of (k, v) pairs that will be set as HTTP headers
on every request
:param catalog: An optional initial catalog to use. Requires DBR version 9.0+
:param schema: An optional initial schema to use. Requires DBR version 9.0+
:param kwargs: Additional parameters internal to Databricks SQL Connector parameters
"""
hook_name = "Databricks SQL"
_test_connection_sql = "select 42"
def __init__(
self,
databricks_conn_id: str = BaseDatabricksHook.default_conn_name,
http_path: str | None = None,
sql_endpoint_name: str | None = None,
session_configuration: dict[str, str] | None = None,
http_headers: list[tuple[str, str]] | None = None,
catalog: str | None = None,
schema: str | None = None,
caller: str = "DatabricksSqlHook",
**kwargs,
) -> None:
super().__init__(databricks_conn_id, caller=caller)
self._sql_conn = None
self._token: str | None = None
self._http_path = http_path
self._sql_endpoint_name = sql_endpoint_name
self.supports_autocommit = True
self.session_config = session_configuration
self.http_headers = http_headers
self.catalog = catalog
self.schema = schema
self.additional_params = kwargs
def _get_extra_config(self) -> dict[str, Any | None]:
extra_params = copy(self.databricks_conn.extra_dejson)
for arg in ["http_path", "session_configuration"] + self.extra_parameters:
if arg in extra_params:
del extra_params[arg]
return extra_params
def _get_sql_endpoint_by_name(self, endpoint_name) -> dict[str, Any]:
result = self._do_api_call(LIST_SQL_ENDPOINTS_ENDPOINT)
if "endpoints" not in result:
raise AirflowException("Can't list Databricks SQL endpoints")
lst = [endpoint for endpoint in result["endpoints"] if endpoint["name"] == endpoint_name]
if len(lst) == 0:
raise AirflowException(f"Can't f Databricks SQL endpoint with name '{endpoint_name}'")
return lst[0]
def get_conn(self) -> Connection:
"""Returns a Databricks SQL connection object."""
if not self._http_path:
if self._sql_endpoint_name:
endpoint = self._get_sql_endpoint_by_name(self._sql_endpoint_name)
self._http_path = endpoint["odbc_params"]["path"]
elif "http_path" in self.databricks_conn.extra_dejson:
self._http_path = self.databricks_conn.extra_dejson["http_path"]
else:
raise AirflowException(
"http_path should be provided either explicitly, "
"or in extra parameter of Databricks connection, "
"or sql_endpoint_name should be specified"
)
requires_init = True
if not self._token:
self._token = self._get_token(raise_error=True)
else:
new_token = self._get_token(raise_error=True)
if new_token != self._token:
self._token = new_token
else:
requires_init = False
if not self.session_config:
self.session_config = self.databricks_conn.extra_dejson.get("session_configuration")
if not self._sql_conn or requires_init:
if self._sql_conn: # close already existing connection
self._sql_conn.close()
self._sql_conn = sql.connect(
self.host,
self._http_path,
self._token,
schema=self.schema,
catalog=self.catalog,
session_configuration=self.session_config,
http_headers=self.http_headers,
_user_agent_entry=self.user_agent_value,
**self._get_extra_config(),
**self.additional_params,
)
return self._sql_conn
def run(
self,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping | None = None,
handler: Callable | None = None,
split_statements: bool = True,
return_last: bool = True,
) -> Any | list[Any] | None:
"""Runs a command or a list of commands.
Pass a list of SQL statements to the SQL parameter to get them to
execute sequentially.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param autocommit: What to set the connection's autocommit setting to
before executing the query. Note that currently there is no commit functionality
in Databricks SQL so this flag has no effect.
:param parameters: The parameters to render the SQL query with.
:param handler: The result handler which is called with the result of each statement.
:param split_statements: Whether to split a single SQL string into statements and run separately
:param return_last: Whether to return result for only last statement or for all after split
:return: return only result of the LAST SQL expression if handler was provided unless return_last
is set to False.
"""
self.descriptions = []
if isinstance(sql, str):
if split_statements:
sql_list = [self.strip_sql_string(s) for s in self.split_sql_string(sql)]
else:
sql_list = [self.strip_sql_string(sql)]
else:
sql_list = [self.strip_sql_string(s) for s in sql]
if sql_list:
self.log.debug("Executing following statements against Databricks DB: %s", sql_list)
else:
raise ValueError("List of SQL statements is empty")
conn = None
results = []
for sql_statement in sql_list:
# when using AAD tokens, it could expire if previous query run longer than token lifetime
conn = self.get_conn()
with closing(conn.cursor()) as cur:
self.set_autocommit(conn, autocommit)
with closing(conn.cursor()) as cur:
self._run_command(cur, sql_statement, parameters)
if handler is not None:
result = handler(cur)
if return_single_query_results(sql, return_last, split_statements):
results = [result]
self.descriptions = [cur.description]
else:
results.append(result)
self.descriptions.append(cur.description)
if conn:
conn.close()
self._sql_conn = None
if handler is None:
return None
if return_single_query_results(sql, return_last, split_statements):
return results[-1]
else:
return results
def bulk_dump(self, table, tmp_file):
raise NotImplementedError()
def bulk_load(self, table, tmp_file):
raise NotImplementedError()
| 9,359 | 42.333333 | 105 | py |
airflow | airflow-main/airflow/providers/databricks/hooks/databricks_base.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Databricks hook.
This hook enable the submitting and running of jobs to the Databricks platform. Internally the
operators talk to the ``api/2.0/jobs/runs/submit``
`endpoint <https://docs.databricks.com/api/latest/jobs.html#runs-submit>`_.
"""
from __future__ import annotations
import copy
import platform
import time
from functools import cached_property
from typing import Any
from urllib.parse import urlsplit
import aiohttp
import requests
from requests import PreparedRequest, exceptions as requests_exceptions
from requests.auth import AuthBase, HTTPBasicAuth
from requests.exceptions import JSONDecodeError
from tenacity import (
AsyncRetrying,
RetryError,
Retrying,
retry_if_exception,
stop_after_attempt,
wait_exponential,
)
from airflow import __version__
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection
from airflow.providers_manager import ProvidersManager
# https://docs.microsoft.com/en-us/azure/databricks/dev-tools/api/latest/aad/service-prin-aad-token#--get-an-azure-active-directory-access-token
# https://docs.microsoft.com/en-us/graph/deployments#app-registration-and-token-service-root-endpoints
AZURE_DEFAULT_AD_ENDPOINT = "https://login.microsoftonline.com"
AZURE_TOKEN_SERVICE_URL = "{}/{}/oauth2/token"
# https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/how-to-use-vm-token
AZURE_METADATA_SERVICE_TOKEN_URL = "http://169.254.169.254/metadata/identity/oauth2/token"
AZURE_METADATA_SERVICE_INSTANCE_URL = "http://169.254.169.254/metadata/instance"
TOKEN_REFRESH_LEAD_TIME = 120
AZURE_MANAGEMENT_ENDPOINT = "https://management.core.windows.net/"
DEFAULT_DATABRICKS_SCOPE = "2ff814a6-3304-4ab8-85cb-cd0e6f879c1d"
class BaseDatabricksHook(BaseHook):
"""
Base for interaction with Databricks.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
:param timeout_seconds: The amount of time in seconds the requests library
will wait before timing-out.
:param retry_limit: The number of times to retry the connection in case of
service outages.
:param retry_delay: The number of seconds to wait between retries (it
might be a floating point number).
:param retry_args: An optional dictionary with arguments passed to ``tenacity.Retrying`` class.
"""
conn_name_attr: str = "databricks_conn_id"
default_conn_name = "databricks_default"
conn_type = "databricks"
extra_parameters = [
"token",
"host",
"use_azure_managed_identity",
"azure_ad_endpoint",
"azure_resource_id",
"azure_tenant_id",
]
def __init__(
self,
databricks_conn_id: str = default_conn_name,
timeout_seconds: int = 180,
retry_limit: int = 3,
retry_delay: float = 1.0,
retry_args: dict[Any, Any] | None = None,
caller: str = "Unknown",
) -> None:
super().__init__()
self.databricks_conn_id = databricks_conn_id
self.timeout_seconds = timeout_seconds
if retry_limit < 1:
raise ValueError("Retry limit must be greater than or equal to 1")
self.retry_limit = retry_limit
self.retry_delay = retry_delay
self.aad_tokens: dict[str, dict] = {}
self.aad_timeout_seconds = 10
self.caller = caller
def my_after_func(retry_state):
self._log_request_error(retry_state.attempt_number, retry_state.outcome)
if retry_args:
self.retry_args = copy.copy(retry_args)
self.retry_args["retry"] = retry_if_exception(self._retryable_error)
self.retry_args["after"] = my_after_func
else:
self.retry_args = dict(
stop=stop_after_attempt(self.retry_limit),
wait=wait_exponential(min=self.retry_delay, max=(2**retry_limit)),
retry=retry_if_exception(self._retryable_error),
after=my_after_func,
)
@cached_property
def databricks_conn(self) -> Connection:
return self.get_connection(self.databricks_conn_id)
def get_conn(self) -> Connection:
return self.databricks_conn
@cached_property
def user_agent_header(self) -> dict[str, str]:
return {"user-agent": self.user_agent_value}
@cached_property
def user_agent_value(self) -> str:
manager = ProvidersManager()
package_name = manager.hooks[BaseDatabricksHook.conn_type].package_name # type: ignore[union-attr]
provider = manager.providers[package_name]
version = provider.version
python_version = platform.python_version()
system = platform.system().lower()
ua_string = (
f"databricks-airflow/{version} _/0.0.0 python/{python_version} os/{system} "
f"airflow/{__version__} operator/{self.caller}"
)
return ua_string
@cached_property
def host(self) -> str:
if "host" in self.databricks_conn.extra_dejson:
host = self._parse_host(self.databricks_conn.extra_dejson["host"])
else:
host = self._parse_host(self.databricks_conn.host)
return host
async def __aenter__(self):
self._session = aiohttp.ClientSession()
return self
async def __aexit__(self, *err):
await self._session.close()
self._session = None
@staticmethod
def _parse_host(host: str) -> str:
"""
This function is resistant to incorrect connection settings provided by users, in the host field.
For example -- when users supply ``https://xx.cloud.databricks.com`` as the
host, we must strip out the protocol to get the host.::
h = DatabricksHook()
assert h._parse_host('https://xx.cloud.databricks.com') == \
'xx.cloud.databricks.com'
In the case where users supply the correct ``xx.cloud.databricks.com`` as the
host, this function is a no-op.::
assert h._parse_host('xx.cloud.databricks.com') == 'xx.cloud.databricks.com'
"""
urlparse_host = urlsplit(host).hostname
if urlparse_host:
# In this case, host = https://xx.cloud.databricks.com
return urlparse_host
else:
# In this case, host = xx.cloud.databricks.com
return host
def _get_retry_object(self) -> Retrying:
"""
Instantiate a retry object.
:return: instance of Retrying class
"""
return Retrying(**self.retry_args)
def _a_get_retry_object(self) -> AsyncRetrying:
"""
Instantiate an async retry object.
:return: instance of AsyncRetrying class
"""
return AsyncRetrying(**self.retry_args)
def _get_aad_token(self, resource: str) -> str:
"""
Function to get AAD token for given resource.
Supports managed identity or service principal auth.
:param resource: resource to issue token to
:return: AAD token, or raise an exception
"""
aad_token = self.aad_tokens.get(resource)
if aad_token and self._is_aad_token_valid(aad_token):
return aad_token["token"]
self.log.info("Existing AAD token is expired, or going to expire soon. Refreshing...")
try:
for attempt in self._get_retry_object():
with attempt:
if self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
params = {
"api-version": "2018-02-01",
"resource": resource,
}
resp = requests.get(
AZURE_METADATA_SERVICE_TOKEN_URL,
params=params,
headers={**self.user_agent_header, "Metadata": "true"},
timeout=self.aad_timeout_seconds,
)
else:
tenant_id = self.databricks_conn.extra_dejson["azure_tenant_id"]
data = {
"grant_type": "client_credentials",
"client_id": self.databricks_conn.login,
"resource": resource,
"client_secret": self.databricks_conn.password,
}
azure_ad_endpoint = self.databricks_conn.extra_dejson.get(
"azure_ad_endpoint", AZURE_DEFAULT_AD_ENDPOINT
)
resp = requests.post(
AZURE_TOKEN_SERVICE_URL.format(azure_ad_endpoint, tenant_id),
data=data,
headers={
**self.user_agent_header,
"Content-Type": "application/x-www-form-urlencoded",
},
timeout=self.aad_timeout_seconds,
)
resp.raise_for_status()
jsn = resp.json()
if (
"access_token" not in jsn
or jsn.get("token_type") != "Bearer"
or "expires_on" not in jsn
):
raise AirflowException(f"Can't get necessary data from AAD token: {jsn}")
token = jsn["access_token"]
self.aad_tokens[resource] = {"token": token, "expires_on": int(jsn["expires_on"])}
break
except RetryError:
raise AirflowException(f"API requests to Azure failed {self.retry_limit} times. Giving up.")
except requests_exceptions.HTTPError as e:
raise AirflowException(f"Response: {e.response.content}, Status Code: {e.response.status_code}")
return token
async def _a_get_aad_token(self, resource: str) -> str:
"""
Async version of `_get_aad_token()`.
:param resource: resource to issue token to
:return: AAD token, or raise an exception
"""
aad_token = self.aad_tokens.get(resource)
if aad_token and self._is_aad_token_valid(aad_token):
return aad_token["token"]
self.log.info("Existing AAD token is expired, or going to expire soon. Refreshing...")
try:
async for attempt in self._a_get_retry_object():
with attempt:
if self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
params = {
"api-version": "2018-02-01",
"resource": resource,
}
async with self._session.get(
url=AZURE_METADATA_SERVICE_TOKEN_URL,
params=params,
headers={**self.user_agent_header, "Metadata": "true"},
timeout=self.aad_timeout_seconds,
) as resp:
resp.raise_for_status()
jsn = await resp.json()
else:
tenant_id = self.databricks_conn.extra_dejson["azure_tenant_id"]
data = {
"grant_type": "client_credentials",
"client_id": self.databricks_conn.login,
"resource": resource,
"client_secret": self.databricks_conn.password,
}
azure_ad_endpoint = self.databricks_conn.extra_dejson.get(
"azure_ad_endpoint", AZURE_DEFAULT_AD_ENDPOINT
)
async with self._session.post(
url=AZURE_TOKEN_SERVICE_URL.format(azure_ad_endpoint, tenant_id),
data=data,
headers={
**self.user_agent_header,
"Content-Type": "application/x-www-form-urlencoded",
},
timeout=self.aad_timeout_seconds,
) as resp:
resp.raise_for_status()
jsn = await resp.json()
if (
"access_token" not in jsn
or jsn.get("token_type") != "Bearer"
or "expires_on" not in jsn
):
raise AirflowException(f"Can't get necessary data from AAD token: {jsn}")
token = jsn["access_token"]
self.aad_tokens[resource] = {"token": token, "expires_on": int(jsn["expires_on"])}
break
except RetryError:
raise AirflowException(f"API requests to Azure failed {self.retry_limit} times. Giving up.")
except aiohttp.ClientResponseError as err:
raise AirflowException(f"Response: {err.message}, Status Code: {err.status}")
return token
def _get_aad_headers(self) -> dict:
"""
Fill AAD headers if necessary (SPN is outside of the workspace).
:return: dictionary with filled AAD headers
"""
headers = {}
if "azure_resource_id" in self.databricks_conn.extra_dejson:
mgmt_token = self._get_aad_token(AZURE_MANAGEMENT_ENDPOINT)
headers["X-Databricks-Azure-Workspace-Resource-Id"] = self.databricks_conn.extra_dejson[
"azure_resource_id"
]
headers["X-Databricks-Azure-SP-Management-Token"] = mgmt_token
return headers
async def _a_get_aad_headers(self) -> dict:
"""
Async version of `_get_aad_headers()`.
:return: dictionary with filled AAD headers
"""
headers = {}
if "azure_resource_id" in self.databricks_conn.extra_dejson:
mgmt_token = await self._a_get_aad_token(AZURE_MANAGEMENT_ENDPOINT)
headers["X-Databricks-Azure-Workspace-Resource-Id"] = self.databricks_conn.extra_dejson[
"azure_resource_id"
]
headers["X-Databricks-Azure-SP-Management-Token"] = mgmt_token
return headers
@staticmethod
def _is_aad_token_valid(aad_token: dict) -> bool:
"""
Utility function to check AAD token hasn't expired yet.
:param aad_token: dict with properties of AAD token
:return: true if token is valid, false otherwise
"""
now = int(time.time())
if aad_token["expires_on"] > (now + TOKEN_REFRESH_LEAD_TIME):
return True
return False
@staticmethod
def _check_azure_metadata_service() -> None:
"""
Check for Azure Metadata Service.
https://docs.microsoft.com/en-us/azure/virtual-machines/linux/instance-metadata-service
"""
try:
jsn = requests.get(
AZURE_METADATA_SERVICE_INSTANCE_URL,
params={"api-version": "2021-02-01"},
headers={"Metadata": "true"},
timeout=2,
).json()
if "compute" not in jsn or "azEnvironment" not in jsn["compute"]:
raise AirflowException(
f"Was able to fetch some metadata, but it doesn't look like Azure Metadata: {jsn}"
)
except (requests_exceptions.RequestException, ValueError) as e:
raise AirflowException(f"Can't reach Azure Metadata Service: {e}")
async def _a_check_azure_metadata_service(self):
"""Async version of `_check_azure_metadata_service()`."""
try:
async with self._session.get(
url=AZURE_METADATA_SERVICE_INSTANCE_URL,
params={"api-version": "2021-02-01"},
headers={"Metadata": "true"},
timeout=2,
) as resp:
jsn = await resp.json()
if "compute" not in jsn or "azEnvironment" not in jsn["compute"]:
raise AirflowException(
f"Was able to fetch some metadata, but it doesn't look like Azure Metadata: {jsn}"
)
except (requests_exceptions.RequestException, ValueError) as e:
raise AirflowException(f"Can't reach Azure Metadata Service: {e}")
def _get_token(self, raise_error: bool = False) -> str | None:
if "token" in self.databricks_conn.extra_dejson:
self.log.info(
"Using token auth. For security reasons, please set token in Password field instead of extra"
)
return self.databricks_conn.extra_dejson["token"]
elif not self.databricks_conn.login and self.databricks_conn.password:
self.log.info("Using token auth.")
return self.databricks_conn.password
elif "azure_tenant_id" in self.databricks_conn.extra_dejson:
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
raise AirflowException("Azure SPN credentials aren't provided")
self.log.info("Using AAD Token for SPN.")
return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE)
elif self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
self.log.info("Using AAD Token for managed identity.")
self._check_azure_metadata_service()
return self._get_aad_token(DEFAULT_DATABRICKS_SCOPE)
elif raise_error:
raise AirflowException("Token authentication isn't configured")
return None
async def _a_get_token(self, raise_error: bool = False) -> str | None:
if "token" in self.databricks_conn.extra_dejson:
self.log.info(
"Using token auth. For security reasons, please set token in Password field instead of extra"
)
return self.databricks_conn.extra_dejson["token"]
elif not self.databricks_conn.login and self.databricks_conn.password:
self.log.info("Using token auth.")
return self.databricks_conn.password
elif "azure_tenant_id" in self.databricks_conn.extra_dejson:
if self.databricks_conn.login == "" or self.databricks_conn.password == "":
raise AirflowException("Azure SPN credentials aren't provided")
self.log.info("Using AAD Token for SPN.")
return await self._a_get_aad_token(DEFAULT_DATABRICKS_SCOPE)
elif self.databricks_conn.extra_dejson.get("use_azure_managed_identity", False):
self.log.info("Using AAD Token for managed identity.")
await self._a_check_azure_metadata_service()
return await self._a_get_aad_token(DEFAULT_DATABRICKS_SCOPE)
elif raise_error:
raise AirflowException("Token authentication isn't configured")
return None
def _log_request_error(self, attempt_num: int, error: str) -> None:
self.log.error("Attempt %s API Request to Databricks failed with reason: %s", attempt_num, error)
def _do_api_call(
self,
endpoint_info: tuple[str, str],
json: dict[str, Any] | None = None,
wrap_http_errors: bool = True,
):
"""
Utility function to perform an API call with retries.
:param endpoint_info: Tuple of method and endpoint
:param json: Parameters for this API call.
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise,
we throw an AirflowException.
"""
method, endpoint = endpoint_info
# TODO: get rid of explicit 'api/' in the endpoint specification
url = f"https://{self.host}/{endpoint}"
aad_headers = self._get_aad_headers()
headers = {**self.user_agent_header, **aad_headers}
auth: AuthBase
token = self._get_token()
if token:
auth = _TokenAuth(token)
else:
self.log.info("Using basic auth.")
auth = HTTPBasicAuth(self.databricks_conn.login, self.databricks_conn.password)
request_func: Any
if method == "GET":
request_func = requests.get
elif method == "POST":
request_func = requests.post
elif method == "PATCH":
request_func = requests.patch
elif method == "DELETE":
request_func = requests.delete
else:
raise AirflowException("Unexpected HTTP Method: " + method)
try:
for attempt in self._get_retry_object():
with attempt:
response = request_func(
url,
json=json if method in ("POST", "PATCH") else None,
params=json if method == "GET" else None,
auth=auth,
headers=headers,
timeout=self.timeout_seconds,
)
response.raise_for_status()
return response.json()
except RetryError:
raise AirflowException(f"API requests to Databricks failed {self.retry_limit} times. Giving up.")
except requests_exceptions.HTTPError as e:
if wrap_http_errors:
raise AirflowException(
f"Response: {e.response.content}, Status Code: {e.response.status_code}"
)
else:
raise e
async def _a_do_api_call(self, endpoint_info: tuple[str, str], json: dict[str, Any] | None = None):
"""
Async version of `_do_api_call()`.
:param endpoint_info: Tuple of method and endpoint
:param json: Parameters for this API call.
:return: If the api call returns a OK status code,
this function returns the response in JSON. Otherwise, throw an AirflowException.
"""
method, endpoint = endpoint_info
url = f"https://{self.host}/{endpoint}"
aad_headers = await self._a_get_aad_headers()
headers = {**self.user_agent_header, **aad_headers}
auth: aiohttp.BasicAuth
token = await self._a_get_token()
if token:
auth = BearerAuth(token)
else:
self.log.info("Using basic auth.")
auth = aiohttp.BasicAuth(self.databricks_conn.login, self.databricks_conn.password)
request_func: Any
if method == "GET":
request_func = self._session.get
elif method == "POST":
request_func = self._session.post
elif method == "PATCH":
request_func = self._session.patch
else:
raise AirflowException("Unexpected HTTP Method: " + method)
try:
async for attempt in self._a_get_retry_object():
with attempt:
async with request_func(
url,
json=json,
auth=auth,
headers={**headers, **self.user_agent_header},
timeout=self.timeout_seconds,
) as response:
response.raise_for_status()
return await response.json()
except RetryError:
raise AirflowException(f"API requests to Databricks failed {self.retry_limit} times. Giving up.")
except aiohttp.ClientResponseError as err:
raise AirflowException(f"Response: {err.message}, Status Code: {err.status}")
@staticmethod
def _get_error_code(exception: BaseException) -> str:
if isinstance(exception, requests_exceptions.HTTPError):
try:
jsn = exception.response.json()
return jsn.get("error_code", "")
except JSONDecodeError:
pass
return ""
@staticmethod
def _retryable_error(exception: BaseException) -> bool:
if isinstance(exception, requests_exceptions.RequestException):
if isinstance(exception, (requests_exceptions.ConnectionError, requests_exceptions.Timeout)) or (
exception.response is not None
and (
exception.response.status_code >= 500
or exception.response.status_code == 429
or (
exception.response.status_code == 400
and BaseDatabricksHook._get_error_code(exception) == "COULD_NOT_ACQUIRE_LOCK"
)
)
):
return True
if isinstance(exception, aiohttp.ClientResponseError):
if exception.status >= 500 or exception.status == 429:
return True
return False
class _TokenAuth(AuthBase):
"""
Helper class for requests Auth field.
AuthBase requires you to implement the ``__call__``
magic function.
"""
def __init__(self, token: str) -> None:
self.token = token
def __call__(self, r: PreparedRequest) -> PreparedRequest:
r.headers["Authorization"] = "Bearer " + self.token
return r
class BearerAuth(aiohttp.BasicAuth):
"""aiohttp only ships BasicAuth, for Bearer auth we need a subclass of BasicAuth."""
def __new__(cls, token: str) -> BearerAuth:
return super().__new__(cls, token) # type: ignore
def __init__(self, token: str) -> None:
self.token = token
def encode(self) -> str:
return f"Bearer {self.token}"
| 26,696 | 39.821101 | 144 | py |
airflow | airflow-main/airflow/providers/databricks/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/databricks/utils/databricks.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.exceptions import AirflowException
from airflow.providers.databricks.hooks.databricks import RunState
def normalise_json_content(content, json_path: str = "json") -> str | bool | list | dict:
"""
Normalize content or all values of content if it is a dict to a string.
The function will throw if content contains non-string or non-numeric non-boolean
types. The reason why we have this function is because the ``self.json`` field
must be a dict with only string values. This is because ``render_template`` will
fail for numerical values.
The only one exception is when we have boolean values, they can not be converted
to string type because databricks does not understand 'True' or 'False' values.
"""
normalise = normalise_json_content
if isinstance(content, (str, bool)):
return content
elif isinstance(
content,
(
int,
float,
),
):
# Databricks can tolerate either numeric or string types in the API backend.
return str(content)
elif isinstance(content, (list, tuple)):
return [normalise(e, f"{json_path}[{i}]") for i, e in enumerate(content)]
elif isinstance(content, dict):
return {k: normalise(v, f"{json_path}[{k}]") for k, v in list(content.items())}
else:
param_type = type(content)
msg = f"Type {param_type} used for parameter {json_path} is not a number or a string"
raise AirflowException(msg)
def validate_trigger_event(event: dict):
"""
Validates correctness of the event received from DatabricksExecutionTrigger.
See: :class:`~airflow.providers.databricks.triggers.databricks.DatabricksExecutionTrigger`.
"""
keys_to_check = ["run_id", "run_page_url", "run_state"]
for key in keys_to_check:
if key not in event:
raise AirflowException(f"Could not find `{key}` in the event: {event}")
try:
RunState.from_json(event["run_state"])
except Exception:
raise AirflowException(f'Run state returned by the Trigger is incorrect: {event["run_state"]}')
| 2,945 | 39.356164 | 103 | py |
airflow | airflow-main/airflow/providers/databricks/utils/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/databricks/sensors/databricks_sql.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Databricks sensors."""
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Any, Callable, Iterable, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.common.sql.hooks.sql import fetch_all_handler
from airflow.providers.databricks.hooks.databricks_sql import DatabricksSqlHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DatabricksSqlSensor(BaseSensorOperator):
"""
Sensor that runs a SQL query on Databricks.
:param databricks_conn_id: Reference to :ref:`Databricks
connection id<howto/connection:databricks>` (templated), defaults to
DatabricksSqlHook.default_conn_name.
:param sql_warehouse_name: Optional name of Databricks SQL warehouse. If not specified, ``http_path``
must be provided as described below, defaults to None
:param http_path: Optional string specifying HTTP path of Databricks SQL warehouse or All Purpose cluster.
If not specified, it should be either specified in the Databricks connection's
extra parameters, or ``sql_warehouse_name`` must be specified.
:param session_configuration: An optional dictionary of Spark session parameters. If not specified,
it could be specified in the Databricks connection's extra parameters, defaults to None
:param http_headers: An optional list of (k, v) pairs
that will be set as HTTP headers on every request. (templated).
:param catalog: An optional initial catalog to use.
Requires Databricks Runtime version 9.0+ (templated), defaults to ""
:param schema: An optional initial schema to use.
Requires Databricks Runtime version 9.0+ (templated), defaults to "default"
:param sql: SQL statement to be executed.
:param handler: Handler for DbApiHook.run() to return results, defaults to fetch_all_handler
:param client_parameters: Additional parameters internal to Databricks SQL connector parameters.
"""
template_fields: Sequence[str] = (
"databricks_conn_id",
"sql",
"catalog",
"schema",
"http_headers",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
def __init__(
self,
*,
databricks_conn_id: str = DatabricksSqlHook.default_conn_name,
http_path: str | None = None,
sql_warehouse_name: str | None = None,
session_configuration=None,
http_headers: list[tuple[str, str]] | None = None,
catalog: str = "",
schema: str = "default",
sql: str | Iterable[str],
handler: Callable[[Any], Any] = fetch_all_handler,
client_parameters: dict[str, Any] | None = None,
**kwargs,
) -> None:
"""Creates DatabricksSqlSensor object using the specified input arguments."""
self.databricks_conn_id = databricks_conn_id
self._http_path = http_path
self._sql_warehouse_name = sql_warehouse_name
self.session_config = session_configuration
self.http_headers = http_headers
self.catalog = catalog
self.schema = schema
self.sql = sql
self.caller = "DatabricksSqlSensor"
self.client_parameters = client_parameters or {}
self.hook_params = kwargs.pop("hook_params", {})
self.handler = handler
super().__init__(**kwargs)
@cached_property
def hook(self) -> DatabricksSqlHook:
"""Creates and returns a DatabricksSqlHook object."""
return DatabricksSqlHook(
self.databricks_conn_id,
self._http_path,
self._sql_warehouse_name,
self.session_config,
self.http_headers,
self.catalog,
self.schema,
self.caller,
**self.client_parameters,
**self.hook_params,
)
def _get_results(self) -> bool:
"""Uses the Databricks SQL hook and runs the specified SQL query."""
if not (self._http_path or self._sql_warehouse_name):
raise AirflowException(
"Databricks SQL warehouse/cluster configuration missing. Please specify either http_path or "
"sql_warehouse_name."
)
hook = self.hook
sql_result = hook.run(
self.sql,
handler=self.handler if self.do_xcom_push else None,
)
self.log.debug("SQL result: %s", sql_result)
return bool(sql_result)
def poke(self, context: Context) -> bool:
"""Sensor poke function to get and return results from the SQL sensor."""
return self._get_results()
| 5,543 | 40.066667 | 110 | py |
airflow | airflow-main/airflow/providers/databricks/sensors/databricks_partition.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Databricks sensors."""
from __future__ import annotations
from datetime import datetime
from functools import cached_property
from typing import TYPE_CHECKING, Any, Callable, Sequence
from databricks.sql.utils import ParamEscaper
from airflow.exceptions import AirflowException
from airflow.providers.common.sql.hooks.sql import fetch_all_handler
from airflow.providers.databricks.hooks.databricks_sql import DatabricksSqlHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class DatabricksPartitionSensor(BaseSensorOperator):
"""
Sensor to detect the presence of table partitions in Databricks.
:param databricks_conn_id: Reference to :ref:`Databricks
connection id<howto/connection:databricks>` (templated), defaults to
DatabricksSqlHook.default_conn_name.
:param sql_warehouse_name: Optional name of Databricks SQL warehouse. If not specified, ``http_path``
must be provided as described below, defaults to None
:param http_path: Optional string specifying HTTP path of Databricks SQL warehouse or All Purpose cluster.
If not specified, it should be either specified in the Databricks connection's
extra parameters, or ``sql_warehouse_name`` must be specified.
:param session_configuration: An optional dictionary of Spark session parameters. If not specified,
it could be specified in the Databricks connection's extra parameters, defaults to None
:param http_headers: An optional list of (k, v) pairs
that will be set as HTTP headers on every request. (templated).
:param catalog: An optional initial catalog to use.
Requires Databricks Runtime version 9.0+ (templated), defaults to ""
:param schema: An optional initial schema to use.
Requires Databricks Runtime version 9.0+ (templated), defaults to "default"
:param table_name: Name of the table to check partitions.
:param partitions: Name of the partitions to check.
Example: {"date": "2023-01-03", "name": ["abc", "def"]}
:param partition_operator: Optional comparison operator for partitions, such as >=.
:param handler: Handler for DbApiHook.run() to return results, defaults to fetch_all_handler
:param client_parameters: Additional parameters internal to Databricks SQL connector parameters.
"""
template_fields: Sequence[str] = (
"databricks_conn_id",
"catalog",
"schema",
"table_name",
"partitions",
"http_headers",
)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
def __init__(
self,
*,
databricks_conn_id: str = DatabricksSqlHook.default_conn_name,
http_path: str | None = None,
sql_warehouse_name: str | None = None,
session_configuration=None,
http_headers: list[tuple[str, str]] | None = None,
catalog: str = "",
schema: str = "default",
table_name: str,
partitions: dict,
partition_operator: str = "=",
handler: Callable[[Any], Any] = fetch_all_handler,
client_parameters: dict[str, Any] | None = None,
**kwargs,
) -> None:
self.databricks_conn_id = databricks_conn_id
self._http_path = http_path
self._sql_warehouse_name = sql_warehouse_name
self.session_config = session_configuration
self.http_headers = http_headers
self.catalog = catalog
self.schema = schema
self.caller = "DatabricksPartitionSensor"
self.partitions = partitions
self.partition_operator = partition_operator
self.table_name = table_name
self.client_parameters = client_parameters or {}
self.hook_params = kwargs.pop("hook_params", {})
self.handler = handler
self.escaper = ParamEscaper()
super().__init__(**kwargs)
def _sql_sensor(self, sql):
"""Executes the supplied SQL statement using the hook object."""
hook = self._get_hook
sql_result = hook.run(
sql,
handler=self.handler if self.do_xcom_push else None,
)
self.log.debug("SQL result: %s", sql_result)
return sql_result
@cached_property
def _get_hook(self) -> DatabricksSqlHook:
"""Creates and returns a DatabricksSqlHook object."""
return DatabricksSqlHook(
self.databricks_conn_id,
self._http_path,
self._sql_warehouse_name,
self.session_config,
self.http_headers,
self.catalog,
self.schema,
self.caller,
**self.client_parameters,
**self.hook_params,
)
def _check_table_partitions(self) -> list:
"""Generate the fully qualified table name, generate partition, and call the _sql_sensor method."""
if self.table_name.split(".")[0] == "delta":
_fully_qualified_table_name = self.table_name
else:
_fully_qualified_table_name = str(self.catalog + "." + self.schema + "." + self.table_name)
self.log.debug("Table name generated from arguments: %s", _fully_qualified_table_name)
_joiner_val = " AND "
_prefix = f"SELECT 1 FROM {_fully_qualified_table_name} WHERE"
_suffix = " LIMIT 1"
partition_sql = self._generate_partition_query(
prefix=_prefix,
suffix=_suffix,
joiner_val=_joiner_val,
opts=self.partitions,
table_name=_fully_qualified_table_name,
escape_key=False,
)
return self._sql_sensor(partition_sql)
def _generate_partition_query(
self,
prefix: str,
suffix: str,
joiner_val: str,
table_name: str,
opts: dict[str, str] | None = None,
escape_key: bool = False,
) -> str:
"""
Queries the table for available partitions.
Generates the SQL query based on the partition data types.
* For a list, it prepares the SQL in the format:
column_name in (value1, value2,...)
* For a numeric type, it prepares the format:
column_name =(or other provided operator such as >=) value
* For a date type, it prepares the format:
column_name =(or other provided operator such as >=) value
Once the filter predicates have been generated like above, the query
is prepared to be executed using the prefix and suffix supplied, which are:
"SELECT 1 FROM {_fully_qualified_table_name} WHERE" and "LIMIT 1".
"""
partition_columns = self._sql_sensor(f"DESCRIBE DETAIL {table_name}")[0][7]
self.log.debug("Partition columns: %s", partition_columns)
if len(partition_columns) < 1:
raise AirflowException(f"Table {table_name} does not have partitions")
formatted_opts = ""
if opts is not None and len(opts) > 0:
output_list = []
for partition_col, partition_value in opts.items():
if escape_key:
partition_col = self.escaper.escape_item(partition_col)
if partition_col in partition_columns:
if isinstance(partition_value, list):
output_list.append(f"""{partition_col} in {tuple(partition_value)}""")
self.log.debug("List formatting for partitions: %s", output_list)
if isinstance(partition_value, (int, float, complex)):
output_list.append(
f"""{partition_col}{self.partition_operator}{self.escaper.escape_item(partition_value)}"""
)
if isinstance(partition_value, (str, datetime)):
output_list.append(
f"""{partition_col}{self.partition_operator}{self.escaper.escape_item(partition_value)}"""
)
else:
raise AirflowException(
f"Column {partition_col} not part of table partitions: {partition_columns}"
)
else:
# Raises exception if the table does not have any partitions.
raise AirflowException("No partitions specified to check with the sensor.")
formatted_opts = f"{prefix} {joiner_val.join(output_list)} {suffix}"
self.log.debug("Formatted options: %s", formatted_opts)
return formatted_opts.strip()
def poke(self, context: Context) -> bool:
"""Checks the table partitions and returns the results."""
partition_result = self._check_table_partitions()
self.log.debug("Partition sensor result: %s", partition_result)
if len(partition_result) >= 1:
return True
else:
raise AirflowException(f"Specified partition(s): {self.partitions} were not found.")
| 9,827 | 42.875 | 118 | py |
airflow | airflow-main/airflow/providers/databricks/sensors/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/microsoft/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/utils.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from functools import wraps
def _ensure_prefixes(conn_type):
"""
Deprecated.
Remove when provider min airflow version >= 2.5.0 since this is handled by
provider manager from that version.
"""
def dec(func):
@wraps(func)
def inner():
field_behaviors = func()
conn_attrs = {"host", "schema", "login", "password", "port", "extra"}
def _ensure_prefix(field):
if field not in conn_attrs and not field.startswith("extra__"):
return f"extra__{conn_type}__{field}"
else:
return field
if "placeholders" in field_behaviors:
placeholders = field_behaviors["placeholders"]
field_behaviors["placeholders"] = {_ensure_prefix(k): v for k, v in placeholders.items()}
return field_behaviors
return inner
return dec
def get_field(*, conn_id: str, conn_type: str, extras: dict, field_name: str):
"""Get field from extra, first checking short name, then for backcompat we check for prefixed name."""
backcompat_prefix = f"extra__{conn_type}__"
backcompat_key = f"{backcompat_prefix}{field_name}"
ret = None
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix "
"when using this method."
)
if field_name in extras:
if backcompat_key in extras:
warnings.warn(
f"Conflicting params `{field_name}` and `{backcompat_key}` found in extras for conn "
f"{conn_id}. Using value for `{field_name}`. Please ensure this is the correct "
f"value and remove the backcompat key `{backcompat_key}`."
)
ret = extras[field_name]
elif backcompat_key in extras:
ret = extras.get(backcompat_key)
if ret == "":
return None
return ret
| 2,821 | 35.649351 | 106 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "6.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-microsoft-azure:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,540 | 35.690476 | 124 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/secrets/key_vault.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import re
import warnings
from functools import cached_property
from azure.core.exceptions import ResourceNotFoundError
from azure.identity import DefaultAzureCredential
from azure.keyvault.secrets import SecretClient
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.secrets import BaseSecretsBackend
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.version import version as airflow_version
def _parse_version(val):
val = re.sub(r"(\d+\.\d+\.\d+).*", lambda x: x.group(1), val)
return tuple(int(x) for x in val.split("."))
class AzureKeyVaultBackend(BaseSecretsBackend, LoggingMixin):
"""
Retrieves Airflow Connections or Variables from Azure Key Vault secrets.
The Azure Key Vault can be configured as a secrets backend in the ``airflow.cfg``:
.. code-block:: ini
[secrets]
backend = airflow.providers.microsoft.azure.secrets.key_vault.AzureKeyVaultBackend
backend_kwargs = {"connections_prefix": "airflow-connections", "vault_url": "<azure_key_vault_uri>"}
For example, if the secrets prefix is ``airflow-connections-smtp-default``, this would be accessible
if you provide ``{"connections_prefix": "airflow-connections"}`` and request conn_id ``smtp-default``.
And if variables prefix is ``airflow-variables-hello``, this would be accessible
if you provide ``{"variables_prefix": "airflow-variables"}`` and request variable key ``hello``.
For client authentication, the ``DefaultAzureCredential`` from the Azure Python SDK is used as
credential provider, which supports service principal, managed identity and user credentials
For example, to specify a service principal with secret you can set the environment variables
``AZURE_TENANT_ID``, ``AZURE_CLIENT_ID`` and ``AZURE_CLIENT_SECRET``.
.. seealso::
For more details on client authentication refer to the ``DefaultAzureCredential`` Class reference:
https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python
:param connections_prefix: Specifies the prefix of the secret to read to get Connections
If set to None (null), requests for connections will not be sent to Azure Key Vault
:param variables_prefix: Specifies the prefix of the secret to read to get Variables
If set to None (null), requests for variables will not be sent to Azure Key Vault
:param config_prefix: Specifies the prefix of the secret to read to get Variables.
If set to None (null), requests for configurations will not be sent to Azure Key Vault
:param vault_url: The URL of an Azure Key Vault to use
:param sep: separator used to concatenate secret_prefix and secret_id. Default: "-"
"""
def __init__(
self,
connections_prefix: str = "airflow-connections",
variables_prefix: str = "airflow-variables",
config_prefix: str = "airflow-config",
vault_url: str = "",
sep: str = "-",
**kwargs,
) -> None:
super().__init__()
self.vault_url = vault_url
if connections_prefix is not None:
self.connections_prefix = connections_prefix.rstrip(sep)
else:
self.connections_prefix = connections_prefix
if variables_prefix is not None:
self.variables_prefix = variables_prefix.rstrip(sep)
else:
self.variables_prefix = variables_prefix
if config_prefix is not None:
self.config_prefix = config_prefix.rstrip(sep)
else:
self.config_prefix = config_prefix
self.sep = sep
self.kwargs = kwargs
@cached_property
def client(self) -> SecretClient:
"""Create a Azure Key Vault client."""
credential = DefaultAzureCredential()
client = SecretClient(vault_url=self.vault_url, credential=credential, **self.kwargs)
return client
def get_conn_value(self, conn_id: str) -> str | None:
"""
Get a serialized representation of Airflow Connection from an Azure Key Vault secret.
:param conn_id: The Airflow connection id to retrieve
"""
if self.connections_prefix is None:
return None
return self._get_secret(self.connections_prefix, conn_id)
def get_conn_uri(self, conn_id: str) -> str | None:
"""
Return URI representation of Connection conn_id.
As of Airflow version 2.3.0 this method is deprecated.
:param conn_id: the connection id
:return: deserialized Connection
"""
if _parse_version(airflow_version) >= (2, 3):
warnings.warn(
f"Method `{self.__class__.__name__}.get_conn_uri` is deprecated and will be removed "
"in a future release. Please use method `get_conn_value` instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
return self.get_conn_value(conn_id)
def get_variable(self, key: str) -> str | None:
"""
Get an Airflow Variable from an Azure Key Vault secret.
:param key: Variable Key
:return: Variable Value
"""
if self.variables_prefix is None:
return None
return self._get_secret(self.variables_prefix, key)
def get_config(self, key: str) -> str | None:
"""
Get Airflow Configuration.
:param key: Configuration Option Key
:return: Configuration Option Value
"""
if self.config_prefix is None:
return None
return self._get_secret(self.config_prefix, key)
@staticmethod
def build_path(path_prefix: str, secret_id: str, sep: str = "-") -> str:
"""
Given a path_prefix and secret_id, build a valid secret name for the Azure Key Vault Backend.
Also replaces underscore in the path with dashes to support easy switching between
environment variables, so ``connection_default`` becomes ``connection-default``.
:param path_prefix: The path prefix of the secret to retrieve
:param secret_id: Name of the secret
:param sep: Separator used to concatenate path_prefix and secret_id
"""
# When an empty prefix is given, do not add a separator to the secret name
if path_prefix == "":
path = f"{secret_id}"
else:
path = f"{path_prefix}{sep}{secret_id}"
return path.replace("_", sep)
def _get_secret(self, path_prefix: str, secret_id: str) -> str | None:
"""
Get an Azure Key Vault secret value.
:param path_prefix: Prefix for the Path to get Secret
:param secret_id: Secret Key
"""
name = self.build_path(path_prefix, secret_id, self.sep)
try:
secret = self.client.get_secret(name=name)
return secret.value
except ResourceNotFoundError as ex:
self.log.debug("Secret %s not found: %s", name, ex)
return None
| 7,891 | 39.680412 | 122 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/secrets/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/transfers/azure_blob_to_gcs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.google.cloud.transfers.azure_blob_to_gcs import (
AzureBlobStorageToGCSOperator as AzureBlobStorageToGCSOperatorFromGoogleProvider,
)
class AzureBlobStorageToGCSOperator(AzureBlobStorageToGCSOperatorFromGoogleProvider):
"""
This class is deprecated.
Please use
:class:`airflow.providers.google.cloud.transfers.azure_blob_to_gcs.AzureBlobStorageToGCSOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.google.cloud.transfers.azure_blob_to_gcs.AzureBlobStorageToGCSOperator`.""",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| 1,681 | 36.377778 | 107 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/transfers/sftp_to_wasb.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains SFTP to Azure Blob Storage operator."""
from __future__ import annotations
import os
from collections import namedtuple
from functools import cached_property
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
if TYPE_CHECKING:
from airflow.utils.context import Context
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
from airflow.providers.sftp.hooks.sftp import SFTPHook
WILDCARD = "*"
SftpFile = namedtuple("SftpFile", "sftp_file_path, blob_name")
class SFTPToWasbOperator(BaseOperator):
"""
Transfer files to Azure Blob Storage from SFTP server.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SFTPToWasbOperator`
:param sftp_source_path: The sftp remote path. This is the specified file path
for downloading the single file or multiple files from the SFTP server.
You can use only one wildcard within your path. The wildcard can appear
inside the path or at the end of the path.
:param container_name: Name of the container.
:param blob_prefix: Prefix to name a blob.
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:param wasb_conn_id: Reference to the wasb connection.
:param load_options: Optional keyword arguments that
``WasbHook.load_file()`` takes.
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:param wasb_overwrite_object: Whether the blob to be uploaded
should overwrite the current data.
When wasb_overwrite_object is True, it will overwrite the existing data.
If set to False, the operation might fail with
ResourceExistsError in case a blob object already exists.
:param create_container: Attempt to create the target container prior to uploading the blob. This is
useful if the target container may not exist yet. Defaults to False.
"""
template_fields: Sequence[str] = ("sftp_source_path", "container_name", "blob_prefix")
def __init__(
self,
*,
sftp_source_path: str,
container_name: str,
blob_prefix: str = "",
sftp_conn_id: str = "sftp_default",
wasb_conn_id: str = "wasb_default",
load_options: dict | None = None,
move_object: bool = False,
wasb_overwrite_object: bool = False,
create_container: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sftp_source_path = sftp_source_path
self.blob_prefix = blob_prefix
self.sftp_conn_id = sftp_conn_id
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.wasb_conn_id = wasb_conn_id
self.load_options = load_options or {"overwrite": wasb_overwrite_object}
self.move_object = move_object
self.create_container = create_container
def dry_run(self) -> None:
super().dry_run()
sftp_files: list[SftpFile] = self.get_sftp_files_map()
for file in sftp_files:
self.log.info(
"Process will upload file from (SFTP) %s to wasb://%s as %s",
file.sftp_file_path,
self.container_name,
file.blob_name,
)
if self.move_object:
self.log.info("Executing delete of %s", file)
def execute(self, context: Context) -> None:
"""Upload a file from SFTP to Azure Blob Storage."""
sftp_files: list[SftpFile] = self.get_sftp_files_map()
uploaded_files = self.copy_files_to_wasb(sftp_files)
if self.move_object:
self.delete_files(uploaded_files)
def get_sftp_files_map(self) -> list[SftpFile]:
"""Get SFTP files from the source path, it may use a WILDCARD to this end."""
sftp_files = []
sftp_complete_path, prefix, delimiter = self.get_tree_behavior()
found_files, _, _ = self.sftp_hook.get_tree_map(
sftp_complete_path, prefix=prefix, delimiter=delimiter
)
self.log.info("Found %s files at sftp source path: %s", str(len(found_files)), self.sftp_source_path)
for file in found_files:
future_blob_name = self.get_full_path_blob(file)
sftp_files.append(SftpFile(file, future_blob_name))
return sftp_files
def get_tree_behavior(self) -> tuple[str, str | None, str | None]:
"""Extracts from source path the tree behavior to interact with the remote folder."""
self.check_wildcards_limit()
if self.source_path_contains_wildcard:
prefix, delimiter = self.sftp_source_path.split(WILDCARD, 1)
sftp_complete_path = os.path.dirname(prefix)
return sftp_complete_path, prefix, delimiter
return self.sftp_source_path, None, None
def check_wildcards_limit(self) -> None:
"""Check if there are multiple wildcards used in the SFTP source path."""
total_wildcards = self.sftp_source_path.count(WILDCARD)
if total_wildcards > 1:
raise AirflowException(
"Only one wildcard '*' is allowed in sftp_source_path parameter. "
f"Found {total_wildcards} in {self.sftp_source_path}."
)
@property
def source_path_contains_wildcard(self) -> bool:
"""Checks if the SFTP source path contains a wildcard."""
return WILDCARD in self.sftp_source_path
@cached_property
def sftp_hook(self) -> SFTPHook:
"""Property of sftp hook to be re-used."""
return SFTPHook(self.sftp_conn_id)
def get_full_path_blob(self, file: str) -> str:
"""Get a blob name based on the previous name and a blob_prefix variable."""
return self.blob_prefix + os.path.basename(file)
def copy_files_to_wasb(self, sftp_files: list[SftpFile]) -> list[str]:
"""Upload a list of files from sftp_files to Azure Blob Storage with a new Blob Name."""
uploaded_files = []
wasb_hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
for file in sftp_files:
with NamedTemporaryFile("w") as tmp:
self.sftp_hook.retrieve_file(file.sftp_file_path, tmp.name)
self.log.info(
"Uploading %s to wasb://%s as %s",
file.sftp_file_path,
self.container_name,
file.blob_name,
)
wasb_hook.load_file(
tmp.name,
self.container_name,
file.blob_name,
self.create_container,
**self.load_options,
)
uploaded_files.append(file.sftp_file_path)
return uploaded_files
def delete_files(self, uploaded_files: list[str]) -> None:
"""Delete files at SFTP which have been moved to Azure Blob Storage."""
for sftp_file_path in uploaded_files:
self.log.info("Executing delete of %s", sftp_file_path)
self.sftp_hook.delete_file(sftp_file_path)
| 8,202 | 39.408867 | 109 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/transfers/oracle_to_azure_data_lake.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import csv
import os
from tempfile import TemporaryDirectory
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.data_lake import AzureDataLakeHook
from airflow.providers.oracle.hooks.oracle import OracleHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class OracleToAzureDataLakeOperator(BaseOperator):
"""
Runs the query against Oracle and stores the file locally before loading it into Azure Data Lake.
:param filename: file name to be used by the csv file.
:param azure_data_lake_conn_id: destination azure data lake connection.
:param azure_data_lake_path: destination path in azure data lake to put the file.
:param oracle_conn_id: :ref:`Source Oracle connection <howto/connection:oracle>`.
:param sql: SQL query to execute against the Oracle database. (templated)
:param sql_params: Parameters to use in sql query. (templated)
:param delimiter: field delimiter in the file.
:param encoding: encoding type for the file.
:param quotechar: Character to use in quoting.
:param quoting: Quoting strategy. See csv library for more information.
"""
template_fields: Sequence[str] = ("filename", "sql", "sql_params")
template_fields_renderers = {"sql_params": "py"}
ui_color = "#e08c8c"
def __init__(
self,
*,
filename: str,
azure_data_lake_conn_id: str,
azure_data_lake_path: str,
oracle_conn_id: str,
sql: str,
sql_params: dict | None = None,
delimiter: str = ",",
encoding: str = "utf-8",
quotechar: str = '"',
quoting: int = csv.QUOTE_MINIMAL,
**kwargs,
) -> None:
super().__init__(**kwargs)
if sql_params is None:
sql_params = {}
self.filename = filename
self.oracle_conn_id = oracle_conn_id
self.sql = sql
self.sql_params = sql_params
self.azure_data_lake_conn_id = azure_data_lake_conn_id
self.azure_data_lake_path = azure_data_lake_path
self.delimiter = delimiter
self.encoding = encoding
self.quotechar = quotechar
self.quoting = quoting
def _write_temp_file(self, cursor: Any, path_to_save: str | bytes | int) -> None:
with open(path_to_save, "w", encoding=self.encoding) as csvfile:
csv_writer = csv.writer(
csvfile,
delimiter=self.delimiter,
quotechar=self.quotechar,
quoting=self.quoting,
)
csv_writer.writerow(map(lambda field: field[0], cursor.description))
csv_writer.writerows(cursor)
csvfile.flush()
def execute(self, context: Context) -> None:
oracle_hook = OracleHook(oracle_conn_id=self.oracle_conn_id)
azure_data_lake_hook = AzureDataLakeHook(azure_data_lake_conn_id=self.azure_data_lake_conn_id)
self.log.info("Dumping Oracle query results to local file")
conn = oracle_hook.get_conn()
cursor = conn.cursor() # type: ignore[attr-defined]
cursor.execute(self.sql, self.sql_params)
with TemporaryDirectory(prefix="airflow_oracle_to_azure_op_") as temp:
self._write_temp_file(cursor, os.path.join(temp, self.filename))
self.log.info("Uploading local file to Azure Data Lake")
azure_data_lake_hook.upload_file(
os.path.join(temp, self.filename), os.path.join(self.azure_data_lake_path, self.filename)
)
cursor.close()
conn.close() # type: ignore[attr-defined]
| 4,475 | 39.324324 | 105 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/transfers/local_to_adls.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Any, Sequence
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.data_lake import AzureDataLakeHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class LocalFilesystemToADLSOperator(BaseOperator):
"""
Upload file(s) to Azure Data Lake.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:LocalFilesystemToADLSOperator`
:param local_path: local path. Can be single file, directory (in which case,
upload recursively) or glob pattern. Recursive glob patterns using `**`
are not supported
:param remote_path: Remote path to upload to; if multiple files, this is the
directory root to write within
:param nthreads: Number of threads to use. If None, uses the number of cores.
:param overwrite: Whether to forcibly overwrite existing files/directories.
If False and remote path is a directory, will quit regardless if any files
would be overwritten or not. If True, only matching filenames are actually
overwritten
:param buffersize: int [2**22]
Number of bytes for internal buffer. This block cannot be bigger than
a chunk and cannot be smaller than a block
:param blocksize: int [2**22]
Number of bytes for a block. Within each chunk, we write a smaller
block for each API call. This block cannot be bigger than a chunk
:param extra_upload_options: Extra upload options to add to the hook upload method
:param azure_data_lake_conn_id: Reference to the Azure Data Lake connection
"""
template_fields: Sequence[str] = ("local_path", "remote_path")
ui_color = "#e4f0e8"
def __init__(
self,
*,
local_path: str,
remote_path: str,
overwrite: bool = True,
nthreads: int = 64,
buffersize: int = 4194304,
blocksize: int = 4194304,
extra_upload_options: dict[str, Any] | None = None,
azure_data_lake_conn_id: str = "azure_data_lake_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.local_path = local_path
self.remote_path = remote_path
self.overwrite = overwrite
self.nthreads = nthreads
self.buffersize = buffersize
self.blocksize = blocksize
self.extra_upload_options = extra_upload_options
self.azure_data_lake_conn_id = azure_data_lake_conn_id
def execute(self, context: Context) -> None:
if "**" in self.local_path:
raise AirflowException("Recursive glob patterns using `**` are not supported")
if not self.extra_upload_options:
self.extra_upload_options = {}
hook = AzureDataLakeHook(azure_data_lake_conn_id=self.azure_data_lake_conn_id)
self.log.info("Uploading %s to %s", self.local_path, self.remote_path)
return hook.upload_file(
local_path=self.local_path,
remote_path=self.remote_path,
nthreads=self.nthreads,
overwrite=self.overwrite,
buffersize=self.buffersize,
blocksize=self.blocksize,
**self.extra_upload_options,
)
class LocalToAzureDataLakeStorageOperator(LocalFilesystemToADLSOperator):
"""
This class is deprecated.
Please use
:class:`airflow.providers.microsoft.azure.transfers.local_to_adls.LocalFilesystemToADLSOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use
`airflow.providers.microsoft.azure.transfers.local_to_adls.LocalFilesystemToADLSOperator`.""",
AirflowProviderDeprecationWarning,
stacklevel=3,
)
super().__init__(*args, **kwargs)
| 4,828 | 39.579832 | 106 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/transfers/local_to_wasb.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class LocalFilesystemToWasbOperator(BaseOperator):
"""
Uploads a file to Azure Blob Storage.
:param file_path: Path to the file to load. (templated)
:param container_name: Name of the container. (templated)
:param blob_name: Name of the blob. (templated)
:param wasb_conn_id: Reference to the wasb connection.
:param create_container: Attempt to create the target container prior to uploading the blob. This is
useful if the target container may not exist yet. Defaults to False.
:param load_options: Optional keyword arguments that
`WasbHook.load_file()` takes.
"""
template_fields: Sequence[str] = ("file_path", "container_name", "blob_name")
def __init__(
self,
*,
file_path: str,
container_name: str,
blob_name: str,
wasb_conn_id: str = "wasb_default",
create_container: bool = False,
load_options: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
if load_options is None:
load_options = {}
self.file_path = file_path
self.container_name = container_name
self.blob_name = blob_name
self.wasb_conn_id = wasb_conn_id
self.create_container = create_container
self.load_options = load_options
def execute(self, context: Context) -> None:
"""Upload a file to Azure Blob Storage."""
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
self.log.info(
"Uploading %s to wasb://%s as %s",
self.file_path,
self.container_name,
self.blob_name,
)
hook.load_file(
file_path=self.file_path,
container_name=self.container_name,
blob_name=self.blob_name,
create_container=self.create_container,
**self.load_options,
)
| 2,936 | 34.817073 | 104 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/transfers/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/wasb_delete_blob.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.wasb import WasbHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class WasbDeleteBlobOperator(BaseOperator):
"""
Deletes blob(s) on Azure Blob Storage.
:param container_name: Name of the container. (templated)
:param blob_name: Name of the blob. (templated)
:param wasb_conn_id: Reference to the :ref:`wasb connection <howto/connection:wasb>`.
:param check_options: Optional keyword arguments that
`WasbHook.check_for_blob()` takes.
:param is_prefix: If blob_name is a prefix, delete all files matching prefix.
:param ignore_if_missing: if True, then return success even if the
blob does not exist.
"""
template_fields: Sequence[str] = ("container_name", "blob_name")
def __init__(
self,
*,
container_name: str,
blob_name: str,
wasb_conn_id: str = "wasb_default",
check_options: Any = None,
is_prefix: bool = False,
ignore_if_missing: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
if check_options is None:
check_options = {}
self.wasb_conn_id = wasb_conn_id
self.container_name = container_name
self.blob_name = blob_name
self.check_options = check_options
self.is_prefix = is_prefix
self.ignore_if_missing = ignore_if_missing
def execute(self, context: Context) -> None:
self.log.info("Deleting blob: %s\n in wasb://%s", self.blob_name, self.container_name)
hook = WasbHook(wasb_conn_id=self.wasb_conn_id)
hook.delete_file(
self.container_name, self.blob_name, self.is_prefix, self.ignore_if_missing, **self.check_options
)
| 2,687 | 35.821918 | 109 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/data_factory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
import warnings
from typing import TYPE_CHECKING, Any, Sequence
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator, BaseOperatorLink, XCom
from airflow.providers.microsoft.azure.hooks.data_factory import (
AzureDataFactoryHook,
AzureDataFactoryPipelineRunException,
AzureDataFactoryPipelineRunStatus,
PipelineRunInfo,
get_field,
)
from airflow.providers.microsoft.azure.triggers.data_factory import AzureDataFactoryTrigger
from airflow.utils.log.logging_mixin import LoggingMixin
if TYPE_CHECKING:
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
class AzureDataFactoryPipelineRunLink(LoggingMixin, BaseOperatorLink):
"""Constructs a link to monitor a pipeline run in Azure Data Factory."""
name = "Monitor Pipeline Run"
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
run_id = XCom.get_value(key="run_id", ti_key=ti_key)
conn_id = operator.azure_data_factory_conn_id # type: ignore
conn = BaseHook.get_connection(conn_id)
extras = conn.extra_dejson
subscription_id = get_field(extras, "subscriptionId") or get_field(
extras, "extra__azure__subscriptionId"
)
if not subscription_id:
raise KeyError(f"Param subscriptionId not found in conn_id '{conn_id}'")
# Both Resource Group Name and Factory Name can either be declared in the Azure Data Factory
# connection or passed directly to the operator.
resource_group_name = operator.resource_group_name or get_field( # type: ignore
extras, "resource_group_name"
)
factory_name = operator.factory_name or get_field(extras, "factory_name") # type: ignore
url = (
f"https://adf.azure.com/en-us/monitoring/pipelineruns/{run_id}"
f"?factory=/subscriptions/{subscription_id}/"
f"resourceGroups/{resource_group_name}/providers/Microsoft.DataFactory/"
f"factories/{factory_name}"
)
return url
class AzureDataFactoryRunPipelineOperator(BaseOperator):
"""
Executes a data factory pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureDataFactoryRunPipelineOperator`
:param azure_data_factory_conn_id: The connection identifier for connecting to Azure Data Factory.
:param pipeline_name: The name of the pipeline to execute.
:param wait_for_termination: Flag to wait on a pipeline run's termination. By default, this feature is
enabled but could be disabled to perform an asynchronous wait for a long-running pipeline execution
using the ``AzureDataFactoryPipelineRunSensor``.
:param resource_group_name: The resource group name. If a value is not passed in to the operator, the
``AzureDataFactoryHook`` will attempt to use the resource group name provided in the corresponding
connection.
:param factory_name: The data factory name. If a value is not passed in to the operator, the
``AzureDataFactoryHook`` will attempt to use the factory name name provided in the corresponding
connection.
:param reference_pipeline_run_id: The pipeline run identifier. If this run ID is specified the parameters
of the specified run will be used to create a new run.
:param is_recovery: Recovery mode flag. If recovery mode is set to `True`, the specified referenced
pipeline run and the new run will be grouped under the same ``groupId``.
:param start_activity_name: In recovery mode, the rerun will start from this activity. If not specified,
all activities will run.
:param start_from_failure: In recovery mode, if set to true, the rerun will start from failed activities.
The property will be used only if ``start_activity_name`` is not specified.
:param parameters: Parameters of the pipeline run. These parameters are referenced in a pipeline via
``@pipeline().parameters.parameterName`` and will be used only if the ``reference_pipeline_run_id`` is
not specified.
:param timeout: Time in seconds to wait for a pipeline to reach a terminal status for non-asynchronous
waits. Used only if ``wait_for_termination`` is True.
:param check_interval: Time in seconds to check on a pipeline run's status for non-asynchronous waits.
Used only if ``wait_for_termination`` is True.
:param deferrable: Run operator in deferrable mode.
"""
template_fields: Sequence[str] = (
"azure_data_factory_conn_id",
"resource_group_name",
"factory_name",
"pipeline_name",
"reference_pipeline_run_id",
"parameters",
)
template_fields_renderers = {"parameters": "json"}
ui_color = "#0678d4"
operator_extra_links = (AzureDataFactoryPipelineRunLink(),)
def __init__(
self,
*,
pipeline_name: str,
azure_data_factory_conn_id: str = AzureDataFactoryHook.default_conn_name,
wait_for_termination: bool = True,
resource_group_name: str | None = None,
factory_name: str | None = None,
reference_pipeline_run_id: str | None = None,
is_recovery: bool | None = None,
start_activity_name: str | None = None,
start_from_failure: bool | None = None,
parameters: dict[str, Any] | None = None,
timeout: int = 60 * 60 * 24 * 7,
check_interval: int = 60,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(**kwargs)
self.azure_data_factory_conn_id = azure_data_factory_conn_id
self.pipeline_name = pipeline_name
self.wait_for_termination = wait_for_termination
self.resource_group_name = resource_group_name
self.factory_name = factory_name
self.reference_pipeline_run_id = reference_pipeline_run_id
self.is_recovery = is_recovery
self.start_activity_name = start_activity_name
self.start_from_failure = start_from_failure
self.parameters = parameters
self.timeout = timeout
self.check_interval = check_interval
self.deferrable = deferrable
def execute(self, context: Context) -> None:
self.hook = AzureDataFactoryHook(azure_data_factory_conn_id=self.azure_data_factory_conn_id)
self.log.info("Executing the %s pipeline.", self.pipeline_name)
response = self.hook.run_pipeline(
pipeline_name=self.pipeline_name,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
reference_pipeline_run_id=self.reference_pipeline_run_id,
is_recovery=self.is_recovery,
start_activity_name=self.start_activity_name,
start_from_failure=self.start_from_failure,
parameters=self.parameters,
)
self.run_id = vars(response)["run_id"]
# Push the ``run_id`` value to XCom regardless of what happens during execution. This allows for
# retrieval the executed pipeline's ``run_id`` for downstream tasks especially if performing an
# asynchronous wait.
context["ti"].xcom_push(key="run_id", value=self.run_id)
if self.wait_for_termination:
if self.deferrable is False:
self.log.info("Waiting for pipeline run %s to terminate.", self.run_id)
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureDataFactoryPipelineRunStatus.SUCCEEDED,
check_interval=self.check_interval,
timeout=self.timeout,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
):
self.log.info("Pipeline run %s has completed successfully.", self.run_id)
else:
raise AzureDataFactoryPipelineRunException(
f"Pipeline run {self.run_id} has failed or has been cancelled."
)
else:
end_time = time.time() + self.timeout
pipeline_run_info = PipelineRunInfo(
run_id=self.run_id,
factory_name=self.factory_name,
resource_group_name=self.resource_group_name,
)
pipeline_run_status = self.hook.get_pipeline_run_status(**pipeline_run_info)
if pipeline_run_status not in AzureDataFactoryPipelineRunStatus.TERMINAL_STATUSES:
self.defer(
timeout=self.execution_timeout,
trigger=AzureDataFactoryTrigger(
azure_data_factory_conn_id=self.azure_data_factory_conn_id,
run_id=self.run_id,
wait_for_termination=self.wait_for_termination,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
check_interval=self.check_interval,
end_time=end_time,
),
method_name="execute_complete",
)
elif pipeline_run_status == AzureDataFactoryPipelineRunStatus.SUCCEEDED:
self.log.info("Pipeline run %s has completed successfully.", self.run_id)
elif pipeline_run_status in AzureDataFactoryPipelineRunStatus.FAILURE_STATES:
raise AzureDataFactoryPipelineRunException(
f"Pipeline run {self.run_id} has failed or has been cancelled."
)
else:
if self.deferrable is True:
warnings.warn(
"Argument `wait_for_termination` is False and `deferrable` is True , hence "
"`deferrable` parameter doesn't have any effect",
)
def execute_complete(self, context: Context, event: dict[str, str]) -> None:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
if event:
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(event["message"])
def on_kill(self) -> None:
if self.run_id:
self.hook.cancel_pipeline_run(
run_id=self.run_id,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
)
# Check to ensure the pipeline run was cancelled as expected.
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureDataFactoryPipelineRunStatus.CANCELLED,
check_interval=self.check_interval,
timeout=self.timeout,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
):
self.log.info("Pipeline run %s has been cancelled successfully.", self.run_id)
else:
raise AzureDataFactoryPipelineRunException(f"Pipeline run {self.run_id} was not cancelled.")
| 12,525 | 46.44697 | 110 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/batch.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from azure.batch import models as batch_models
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.batch import AzureBatchHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class AzureBatchOperator(BaseOperator):
"""
Executes a job on Azure Batch Service.
:param batch_pool_id: A string that uniquely identifies the Pool within the Account.
:param batch_pool_vm_size: The size of virtual machines in the Pool
:param batch_job_id: A string that uniquely identifies the Job within the Account.
:param batch_task_command_line: The command line of the Task
:param batch_task_id: A string that uniquely identifies the task within the Job.
:param batch_pool_display_name: The display name for the Pool.
The display name need not be unique
:param batch_job_display_name: The display name for the Job.
The display name need not be unique
:param batch_job_manager_task: Details of a Job Manager Task to be launched when the Job is started.
:param batch_job_preparation_task: The Job Preparation Task. If set, the Batch service will
run the Job Preparation Task on a Node before starting any Tasks of that
Job on that Compute Node. Required if batch_job_release_task is set.
:param batch_job_release_task: The Job Release Task. Use to undo changes to Compute Nodes
made by the Job Preparation Task
:param batch_task_display_name: The display name for the task.
The display name need not be unique
:param batch_task_container_settings: The settings for the container under which the Task runs
:param batch_start_task: A Task specified to run on each Compute Node as it joins the Pool.
The Task runs when the Compute Node is added to the Pool or
when the Compute Node is restarted.
:param batch_max_retries: The number of times to retry this batch operation before it's
considered a failed operation. Default is 3
:param batch_task_resource_files: A list of files that the Batch service will
download to the Compute Node before running the command line.
:param batch_task_output_files: A list of files that the Batch service will upload
from the Compute Node after running the command line.
:param batch_task_user_identity: The user identity under which the Task runs.
If omitted, the Task runs as a non-administrative user unique to the Task.
:param target_low_priority_nodes: The desired number of low-priority Compute Nodes in the Pool.
This property must not be specified if enable_auto_scale is set to true.
:param target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool.
This property must not be specified if enable_auto_scale is set to true.
:param enable_auto_scale: Whether the Pool size should automatically adjust over time. Default is false
:param auto_scale_formula: A formula for the desired number of Compute Nodes in the Pool.
This property must not be specified if enableAutoScale is set to false.
It is required if enableAutoScale is set to true.
:param azure_batch_conn_id: The :ref:`Azure Batch connection id<howto/connection:azure_batch>`
:param use_latest_verified_vm_image_and_sku: Whether to use the latest verified virtual
machine image and sku in the batch account. Default is false.
:param vm_publisher: The publisher of the Azure Virtual Machines Marketplace Image.
For example, Canonical or MicrosoftWindowsServer. Required if
use_latest_image_and_sku is set to True
:param vm_offer: The offer type of the Azure Virtual Machines Marketplace Image.
For example, UbuntuServer or WindowsServer. Required if
use_latest_image_and_sku is set to True
:param sku_starts_with: The starting string of the Virtual Machine SKU. Required if
use_latest_image_and_sku is set to True
:param vm_sku: The name of the virtual machine sku to use
:param vm_version: The version of the virtual machine
:param vm_version: str | None
:param vm_node_agent_sku_id: The node agent sku id of the virtual machine
:param os_family: The Azure Guest OS family to be installed on the virtual machines in the Pool.
:param os_version: The OS family version
:param timeout: The amount of time to wait for the job to complete in minutes. Default is 25
:param should_delete_job: Whether to delete job after execution. Default is False
:param should_delete_pool: Whether to delete pool after execution of jobs. Default is False
"""
template_fields: Sequence[str] = (
"batch_pool_id",
"batch_pool_vm_size",
"batch_job_id",
"batch_task_id",
"batch_task_command_line",
)
ui_color = "#f0f0e4"
def __init__(
self,
*,
batch_pool_id: str,
batch_pool_vm_size: str,
batch_job_id: str,
batch_task_command_line: str,
batch_task_id: str,
vm_node_agent_sku_id: str,
vm_publisher: str | None = None,
vm_offer: str | None = None,
sku_starts_with: str | None = None,
vm_sku: str | None = None,
vm_version: str | None = None,
os_family: str | None = None,
os_version: str | None = None,
batch_pool_display_name: str | None = None,
batch_job_display_name: str | None = None,
batch_job_manager_task: batch_models.JobManagerTask | None = None,
batch_job_preparation_task: batch_models.JobPreparationTask | None = None,
batch_job_release_task: batch_models.JobReleaseTask | None = None,
batch_task_display_name: str | None = None,
batch_task_container_settings: batch_models.TaskContainerSettings | None = None,
batch_start_task: batch_models.StartTask | None = None,
batch_max_retries: int = 3,
batch_task_resource_files: list[batch_models.ResourceFile] | None = None,
batch_task_output_files: list[batch_models.OutputFile] | None = None,
batch_task_user_identity: batch_models.UserIdentity | None = None,
target_low_priority_nodes: int | None = None,
target_dedicated_nodes: int | None = None,
enable_auto_scale: bool = False,
auto_scale_formula: str | None = None,
azure_batch_conn_id="azure_batch_default",
use_latest_verified_vm_image_and_sku: bool = False,
timeout: int = 25,
should_delete_job: bool = False,
should_delete_pool: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.batch_pool_id = batch_pool_id
self.batch_pool_vm_size = batch_pool_vm_size
self.batch_job_id = batch_job_id
self.batch_task_id = batch_task_id
self.batch_task_command_line = batch_task_command_line
self.batch_pool_display_name = batch_pool_display_name
self.batch_job_display_name = batch_job_display_name
self.batch_job_manager_task = batch_job_manager_task
self.batch_job_preparation_task = batch_job_preparation_task
self.batch_job_release_task = batch_job_release_task
self.batch_task_display_name = batch_task_display_name
self.batch_task_container_settings = batch_task_container_settings
self.batch_start_task = batch_start_task
self.batch_max_retries = batch_max_retries
self.batch_task_resource_files = batch_task_resource_files
self.batch_task_output_files = batch_task_output_files
self.batch_task_user_identity = batch_task_user_identity
self.target_low_priority_nodes = target_low_priority_nodes
self.target_dedicated_nodes = target_dedicated_nodes
self.enable_auto_scale = enable_auto_scale
self.auto_scale_formula = auto_scale_formula
self.azure_batch_conn_id = azure_batch_conn_id
self.use_latest_image = use_latest_verified_vm_image_and_sku
self.vm_publisher = vm_publisher
self.vm_offer = vm_offer
self.sku_starts_with = sku_starts_with
self.vm_sku = vm_sku
self.vm_version = vm_version
self.vm_node_agent_sku_id = vm_node_agent_sku_id
self.os_family = os_family
self.os_version = os_version
self.timeout = timeout
self.should_delete_job = should_delete_job
self.should_delete_pool = should_delete_pool
self.hook = self.get_hook()
def _check_inputs(self) -> Any:
if not self.os_family and not self.vm_publisher:
raise AirflowException("You must specify either vm_publisher or os_family")
if self.os_family and self.vm_publisher:
raise AirflowException(
"Cloud service configuration and virtual machine configuration "
"are mutually exclusive. You must specify either of os_family and"
" vm_publisher"
)
if self.use_latest_image:
if not all(elem for elem in [self.vm_publisher, self.vm_offer]):
raise AirflowException(
f"If use_latest_image_and_sku is set to True then the parameters vm_publisher, "
f"vm_offer, must all be set. "
f"Found vm_publisher={self.vm_publisher}, vm_offer={self.vm_offer}"
)
if self.vm_publisher:
if not all([self.vm_sku, self.vm_offer, self.vm_node_agent_sku_id]):
raise AirflowException(
"If vm_publisher is set, then the parameters vm_sku, vm_offer,"
"vm_node_agent_sku_id must be set. Found "
f"vm_publisher={self.vm_publisher}, vm_offer={self.vm_offer} "
f"vm_node_agent_sku_id={self.vm_node_agent_sku_id}, "
f"vm_version={self.vm_version}"
)
if not self.target_dedicated_nodes and not self.enable_auto_scale:
raise AirflowException(
"Either target_dedicated_nodes or enable_auto_scale must be set. None was set"
)
if self.enable_auto_scale:
if self.target_dedicated_nodes or self.target_low_priority_nodes:
raise AirflowException(
f"If enable_auto_scale is set, then the parameters target_dedicated_nodes and "
f"target_low_priority_nodes must not be set. Found "
f"target_dedicated_nodes={self.target_dedicated_nodes}, "
f"target_low_priority_nodes={self.target_low_priority_nodes}"
)
if not self.auto_scale_formula:
raise AirflowException("The auto_scale_formula is required when enable_auto_scale is set")
if self.batch_job_release_task and not self.batch_job_preparation_task:
raise AirflowException(
"A batch_job_release_task cannot be specified without also "
" specifying a batch_job_preparation_task for the Job."
)
if not all(
[
self.batch_pool_id,
self.batch_job_id,
self.batch_pool_vm_size,
self.batch_task_id,
self.batch_task_command_line,
]
):
raise AirflowException(
"Some required parameters are missing.Please you must set all the required parameters. "
)
def execute(self, context: Context) -> None:
self._check_inputs()
self.hook.connection.config.retry_policy = self.batch_max_retries
pool = self.hook.configure_pool(
pool_id=self.batch_pool_id,
vm_size=self.batch_pool_vm_size,
display_name=self.batch_pool_display_name,
target_dedicated_nodes=self.target_dedicated_nodes,
use_latest_image_and_sku=self.use_latest_image,
vm_publisher=self.vm_publisher,
vm_offer=self.vm_offer,
sku_starts_with=self.sku_starts_with,
vm_sku=self.vm_sku,
vm_version=self.vm_version,
vm_node_agent_sku_id=self.vm_node_agent_sku_id,
os_family=self.os_family,
os_version=self.os_version,
target_low_priority_nodes=self.target_low_priority_nodes,
enable_auto_scale=self.enable_auto_scale,
auto_scale_formula=self.auto_scale_formula,
start_task=self.batch_start_task,
)
self.hook.create_pool(pool)
# Wait for nodes to reach complete state
self.hook.wait_for_all_node_state(
self.batch_pool_id,
{
batch_models.ComputeNodeState.start_task_failed,
batch_models.ComputeNodeState.unusable,
batch_models.ComputeNodeState.idle,
},
)
# Create job if not already exist
job = self.hook.configure_job(
job_id=self.batch_job_id,
pool_id=self.batch_pool_id,
display_name=self.batch_job_display_name,
job_manager_task=self.batch_job_manager_task,
job_preparation_task=self.batch_job_preparation_task,
job_release_task=self.batch_job_release_task,
)
self.hook.create_job(job)
# Create task
task = self.hook.configure_task(
task_id=self.batch_task_id,
command_line=self.batch_task_command_line,
display_name=self.batch_task_display_name,
container_settings=self.batch_task_container_settings,
resource_files=self.batch_task_resource_files,
output_files=self.batch_task_output_files,
user_identity=self.batch_task_user_identity,
)
# Add task to job
self.hook.add_single_task_to_job(job_id=self.batch_job_id, task=task)
# Wait for tasks to complete
fail_tasks = self.hook.wait_for_job_tasks_to_complete(job_id=self.batch_job_id, timeout=self.timeout)
# Clean up
if self.should_delete_job:
# delete job first
self.clean_up(job_id=self.batch_job_id)
if self.should_delete_pool:
self.clean_up(self.batch_pool_id)
# raise exception if any task fail
if fail_tasks:
raise AirflowException(f"Job fail. The failed task are: {fail_tasks}")
def on_kill(self) -> None:
response = self.hook.connection.job.terminate(
job_id=self.batch_job_id, terminate_reason="Job killed by user"
)
self.log.info("Azure Batch job (%s) terminated: %s", self.batch_job_id, response)
def get_hook(self) -> AzureBatchHook:
"""Create and return an AzureBatchHook."""
return AzureBatchHook(azure_batch_conn_id=self.azure_batch_conn_id)
def clean_up(self, pool_id: str | None = None, job_id: str | None = None) -> None:
"""
Delete the given pool and job in the batch account.
:param pool_id: The id of the pool to delete
:param job_id: The id of the job to delete
"""
if job_id:
self.log.info("Deleting job: %s", job_id)
self.hook.connection.job.delete(job_id)
if pool_id:
self.log.info("Deleting pool: %s", pool_id)
self.hook.connection.pool.delete(pool_id)
| 16,254 | 48.108761 | 109 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/adls.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.data_lake import AzureDataLakeHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class ADLSDeleteOperator(BaseOperator):
"""
Delete files in the specified path.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ADLSDeleteOperator`
:param path: A directory or file to remove
:param recursive: Whether to loop into directories in the location and remove the files
:param ignore_not_found: Whether to raise error if file to delete is not found
:param azure_data_lake_conn_id: Reference to the :ref:`Azure Data Lake connection<howto/connection:adl>`.
"""
template_fields: Sequence[str] = ("path",)
ui_color = "#901dd2"
def __init__(
self,
*,
path: str,
recursive: bool = False,
ignore_not_found: bool = True,
azure_data_lake_conn_id: str = "azure_data_lake_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.path = path
self.recursive = recursive
self.ignore_not_found = ignore_not_found
self.azure_data_lake_conn_id = azure_data_lake_conn_id
def execute(self, context: Context) -> Any:
hook = AzureDataLakeHook(azure_data_lake_conn_id=self.azure_data_lake_conn_id)
return hook.remove(path=self.path, recursive=self.recursive, ignore_not_found=self.ignore_not_found)
class ADLSListOperator(BaseOperator):
"""
List all files from the specified path.
This operator returns a python list with the names of files which can be used by
`xcom` in the downstream tasks.
:param path: The Azure Data Lake path to find the objects. Supports glob
strings (templated)
:param azure_data_lake_conn_id: Reference to the :ref:`Azure Data Lake connection<howto/connection:adl>`.
**Example**:
The following Operator would list all the Parquet files from ``folder/output/``
folder in the specified ADLS account ::
adls_files = ADLSListOperator(
task_id='adls_files',
path='folder/output/*.parquet',
azure_data_lake_conn_id='azure_data_lake_default'
)
"""
template_fields: Sequence[str] = ("path",)
ui_color = "#901dd2"
def __init__(
self, *, path: str, azure_data_lake_conn_id: str = "azure_data_lake_default", **kwargs
) -> None:
super().__init__(**kwargs)
self.path = path
self.azure_data_lake_conn_id = azure_data_lake_conn_id
def execute(self, context: Context) -> list:
hook = AzureDataLakeHook(azure_data_lake_conn_id=self.azure_data_lake_conn_id)
self.log.info("Getting list of ADLS files in path: %s", self.path)
return hook.list(path=self.path)
| 3,789 | 36.524752 | 109 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/synapse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from azure.synapse.spark.models import SparkBatchJobOptions
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.synapse import AzureSynapseHook, AzureSynapseSparkBatchRunStatus
if TYPE_CHECKING:
from airflow.utils.context import Context
class AzureSynapseRunSparkBatchOperator(BaseOperator):
"""
Executes a Spark job on Azure Synapse.
.. see also::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureSynapseRunSparkBatchOperator`
:param azure_synapse_conn_id: The connection identifier for connecting to Azure Synapse.
:param wait_for_termination: Flag to wait on a job run's termination.
:param spark_pool: The target synapse spark pool used to submit the job
:param payload: Livy compatible payload which represents the spark job that a user wants to submit
:param timeout: Time in seconds to wait for a job to reach a terminal status for non-asynchronous
waits. Used only if ``wait_for_termination`` is True.
:param check_interval: Time in seconds to check on a job run's status for non-asynchronous waits.
Used only if ``wait_for_termination`` is True.
"""
template_fields: Sequence[str] = (
"azure_synapse_conn_id",
"spark_pool",
)
template_fields_renderers = {"parameters": "json"}
ui_color = "#0678d4"
def __init__(
self,
*,
azure_synapse_conn_id: str = AzureSynapseHook.default_conn_name,
wait_for_termination: bool = True,
spark_pool: str = "",
payload: SparkBatchJobOptions,
timeout: int = 60 * 60 * 24 * 7,
check_interval: int = 60,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.job_id = None
self.azure_synapse_conn_id = azure_synapse_conn_id
self.wait_for_termination = wait_for_termination
self.spark_pool = spark_pool
self.payload = payload
self.timeout = timeout
self.check_interval = check_interval
def execute(self, context: Context) -> None:
self.hook = AzureSynapseHook(
azure_synapse_conn_id=self.azure_synapse_conn_id, spark_pool=self.spark_pool
)
self.log.info("Executing the Synapse spark job.")
response = self.hook.run_spark_job(payload=self.payload)
self.log.info(response)
self.job_id = vars(response)["id"]
# Push the ``job_id`` value to XCom regardless of what happens during execution. This allows for
# retrieval the executed job's ``id`` for downstream tasks especially if performing an
# asynchronous wait.
context["ti"].xcom_push(key="job_id", value=self.job_id)
if self.wait_for_termination:
self.log.info("Waiting for job run %s to terminate.", self.job_id)
if self.hook.wait_for_job_run_status(
job_id=self.job_id,
expected_statuses=AzureSynapseSparkBatchRunStatus.SUCCESS,
check_interval=self.check_interval,
timeout=self.timeout,
):
self.log.info("Job run %s has completed successfully.", self.job_id)
else:
raise Exception(f"Job run {self.job_id} has failed or has been cancelled.")
def on_kill(self) -> None:
if self.job_id:
self.hook.cancel_job_run(
job_id=self.job_id,
)
self.log.info("Job run %s has been cancelled successfully.", self.job_id)
| 4,422 | 39.953704 | 109 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/adx.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Azure Data Explorer operators."""
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from azure.kusto.data._models import KustoResultTable
from airflow.configuration import conf
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.adx import AzureDataExplorerHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class AzureDataExplorerQueryOperator(BaseOperator):
"""
Operator for querying Azure Data Explorer (Kusto).
:param query: KQL query to run (templated).
:param database: Database to run the query on (templated).
:param options: Optional query options. See:
https://docs.microsoft.com/en-us/azure/kusto/api/netfx/request-properties#list-of-clientrequestproperties
:param azure_data_explorer_conn_id: Reference to the
:ref:`Azure Data Explorer connection<howto/connection:adx>`.
"""
ui_color = "#00a1f2"
template_fields: Sequence[str] = ("query", "database")
template_ext: Sequence[str] = (".kql",)
def __init__(
self,
*,
query: str,
database: str,
options: dict | None = None,
azure_data_explorer_conn_id: str = "azure_data_explorer_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.query = query
self.database = database
self.options = options
self.azure_data_explorer_conn_id = azure_data_explorer_conn_id
def get_hook(self) -> AzureDataExplorerHook:
"""Returns new instance of AzureDataExplorerHook."""
return AzureDataExplorerHook(self.azure_data_explorer_conn_id)
def execute(self, context: Context) -> KustoResultTable | str:
"""
Run KQL Query on Azure Data Explorer (Kusto).
Returns `PrimaryResult` of Query v2 HTTP response contents.
https://docs.microsoft.com/en-us/azure/kusto/api/rest/response2
"""
hook = self.get_hook()
response = hook.run_query(self.query, self.database, self.options)
if conf.getboolean("core", "enable_xcom_pickling"):
return response.primary_results[0]
else:
return str(response.primary_results[0])
| 3,032 | 35.987805 | 111 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/cosmos.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.cosmos import AzureCosmosDBHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class AzureCosmosInsertDocumentOperator(BaseOperator):
"""Insert a new document into the specified Cosmos database and collection.
Both the database and collection will be created automatically if they do
not already exist.
:param database_name: The name of the database. (templated)
:param collection_name: The name of the collection. (templated)
:param document: The document to insert
:param azure_cosmos_conn_id: Reference to the
:ref:`Azure CosmosDB connection<howto/connection:azure_cosmos>`.
"""
template_fields: Sequence[str] = ("database_name", "collection_name")
ui_color = "#e4f0e8"
def __init__(
self,
*,
database_name: str,
collection_name: str,
document: dict,
azure_cosmos_conn_id: str = "azure_cosmos_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.database_name = database_name
self.collection_name = collection_name
self.document = document
self.azure_cosmos_conn_id = azure_cosmos_conn_id
def execute(self, context: Context) -> None:
# Create the hook
hook = AzureCosmosDBHook(azure_cosmos_conn_id=self.azure_cosmos_conn_id)
# Create the DB if it doesn't already exist
if not hook.does_database_exist(self.database_name):
hook.create_database(self.database_name)
# Create the collection as well
if not hook.does_collection_exist(self.collection_name, self.database_name):
hook.create_collection(self.collection_name, self.database_name)
# finally insert the document
hook.upsert_document(self.document, self.database_name, self.collection_name)
| 2,782 | 36.608108 | 85 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/asb.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.asb import AdminClientHook, MessageHook
if TYPE_CHECKING:
from azure.servicebus.management._models import AuthorizationRule
from airflow.utils.context import Context
class AzureServiceBusCreateQueueOperator(BaseOperator):
"""Create a Azure Service Bus queue under a Service Bus Namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusCreateQueueOperator`
:param queue_name: The name of the queue. should be unique.
:param max_delivery_count: The maximum delivery count. A message is automatically
dead lettered after this number of deliveries. Default value is 10..
:param dead_lettering_on_message_expiration: A value that indicates whether this subscription has
dead letter support when a message expires.
:param enable_batched_operations: Value that indicates whether server-side batched
operations are enabled.
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection<howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("queue_name",)
ui_color = "#e4f0e8"
def __init__(
self,
*,
queue_name: str,
max_delivery_count: int = 10,
dead_lettering_on_message_expiration: bool = True,
enable_batched_operations: bool = True,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.queue_name = queue_name
self.max_delivery_count = max_delivery_count
self.dead_lettering_on_message_expiration = dead_lettering_on_message_expiration
self.enable_batched_operations = enable_batched_operations
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Creates Queue in Azure Service Bus namespace, by connecting to Service Bus Admin client in hook."""
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
# create queue with name
queue = hook.create_queue(
self.queue_name,
self.max_delivery_count,
self.dead_lettering_on_message_expiration,
self.enable_batched_operations,
)
self.log.info("Created Queue %s", queue.name)
class AzureServiceBusSendMessageOperator(BaseOperator):
"""Send Message or batch message to the Service Bus queue.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusSendMessageOperator`
:param queue_name: The name of the queue. should be unique.
:param message: Message which needs to be sent to the queue. It can be string or list of string.
:param batch: Its boolean flag by default it is set to False, if the message needs to be sent
as batch message it can be set to True.
:param azure_service_bus_conn_id: Reference to the
:ref: `Azure Service Bus connection<howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("queue_name",)
ui_color = "#e4f0e8"
def __init__(
self,
*,
queue_name: str,
message: str | list[str],
batch: bool = False,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.queue_name = queue_name
self.batch = batch
self.message = message
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Sends Message to the specific queue in Service Bus namespace."""
# Create the hook
hook = MessageHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
# send message
hook.send_message(self.queue_name, self.message, self.batch)
class AzureServiceBusReceiveMessageOperator(BaseOperator):
"""Receive a batch of messages at once in a specified Queue name.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusReceiveMessageOperator`
:param queue_name: The name of the queue name or a QueueProperties with name.
:param max_message_count: Maximum number of messages in the batch.
:param max_wait_time: Maximum time to wait in seconds for the first message to arrive.
:param azure_service_bus_conn_id: Reference to the
:ref: `Azure Service Bus connection <howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("queue_name",)
ui_color = "#e4f0e8"
def __init__(
self,
*,
queue_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
max_message_count: int = 10,
max_wait_time: float = 5,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.queue_name = queue_name
self.azure_service_bus_conn_id = azure_service_bus_conn_id
self.max_message_count = max_message_count
self.max_wait_time = max_wait_time
def execute(self, context: Context) -> None:
"""Receive Message in specific queue in Service Bus namespace by connecting to Service Bus client."""
# Create the hook
hook = MessageHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
# Receive message
hook.receive_message(
self.queue_name, max_message_count=self.max_message_count, max_wait_time=self.max_wait_time
)
class AzureServiceBusDeleteQueueOperator(BaseOperator):
"""Delete the Queue in the Azure Service Bus namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusDeleteQueueOperator`
:param queue_name: The name of the queue in Service Bus namespace.
:param azure_service_bus_conn_id: Reference to the
:ref: `Azure Service Bus connection <howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("queue_name",)
ui_color = "#e4f0e8"
def __init__(
self,
*,
queue_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.queue_name = queue_name
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Delete Queue in Service Bus namespace, by connecting to Service Bus Admin client."""
# Create the hook
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
# delete queue with name
hook.delete_queue(self.queue_name)
class AzureServiceBusTopicCreateOperator(BaseOperator):
"""Create an Azure Service Bus Topic under a Service Bus Namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusTopicCreateOperator`
:param topic_name: Name of the topic.
:param default_message_time_to_live: ISO 8601 default message time span to live value. This is
the duration after which the message expires, starting from when the message is sent to Service
Bus. This is the default value used when TimeToLive is not set on a message itself.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format
like "PT300S" is accepted.
:param max_size_in_megabytes: The maximum size of the topic in megabytes, which is the size of
memory allocated for the topic.
:param requires_duplicate_detection: A value indicating if this topic requires duplicate
detection.
:param duplicate_detection_history_time_window: ISO 8601 time span structure that defines the
duration of the duplicate detection history. The default value is 10 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format
like "PT300S" is accepted.
:param enable_batched_operations: Value that indicates whether server-side batched operations
are enabled.
:param size_in_bytes: The size of the topic, in bytes.
:param filtering_messages_before_publishing: Filter messages before publishing.
:param authorization_rules: List of Authorization rules for resource.
:param support_ordering: A value that indicates whether the topic supports ordering.
:param auto_delete_on_idle: ISO 8601 time span idle interval after which the topic is
automatically deleted. The minimum duration is 5 minutes.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration format
like "PT300S" is accepted.
:param enable_partitioning: A value that indicates whether the topic is to be partitioned
across multiple message brokers.
:param enable_express: A value that indicates whether Express Entities are enabled. An express
queue holds a message in memory temporarily before writing it to persistent storage.
:param user_metadata: Metadata associated with the topic.
:param max_message_size_in_kilobytes: The maximum size in kilobytes of message payload that
can be accepted by the queue. This feature is only available when using a Premium namespace
and Service Bus API version "2021-05" or higher.
The minimum allowed value is 1024 while the maximum allowed value is 102400. Default value is 1024.
"""
template_fields: Sequence[str] = ("topic_name",)
ui_color = "#e4f0e8"
def __init__(
self,
*,
topic_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
default_message_time_to_live: datetime.timedelta | str | None = None,
max_size_in_megabytes: int | None = None,
requires_duplicate_detection: bool | None = None,
duplicate_detection_history_time_window: datetime.timedelta | str | None = None,
enable_batched_operations: bool | None = None,
size_in_bytes: int | None = None,
filtering_messages_before_publishing: bool | None = None,
authorization_rules: list[AuthorizationRule] | None = None,
support_ordering: bool | None = None,
auto_delete_on_idle: datetime.timedelta | str | None = None,
enable_partitioning: bool | None = None,
enable_express: bool | None = None,
user_metadata: str | None = None,
max_message_size_in_kilobytes: int | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.topic_name = topic_name
self.azure_service_bus_conn_id = azure_service_bus_conn_id
self.default_message_time_to_live = default_message_time_to_live
self.max_size_in_megabytes = max_size_in_megabytes
self.requires_duplicate_detection = requires_duplicate_detection
self.duplicate_detection_history_time_window = duplicate_detection_history_time_window
self.enable_batched_operations = enable_batched_operations
self.size_in_bytes = size_in_bytes
self.filtering_messages_before_publishing = filtering_messages_before_publishing
self.authorization_rules = authorization_rules
self.support_ordering = support_ordering
self.auto_delete_on_idle = auto_delete_on_idle
self.enable_partitioning = enable_partitioning
self.enable_express = enable_express
self.user_metadata = user_metadata
self.max_message_size_in_kilobytes = max_message_size_in_kilobytes
def execute(self, context: Context) -> str:
"""Creates Topic in Service Bus namespace, by connecting to Service Bus Admin client."""
if self.topic_name is None:
raise TypeError("Topic name cannot be None.")
# Create the hook
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
with hook.get_conn() as service_mgmt_conn:
topic_properties = service_mgmt_conn.get_topic(self.topic_name)
if topic_properties and topic_properties.name == self.topic_name:
self.log.info("Topic name already exists")
return topic_properties.name
topic = service_mgmt_conn.create_topic(
topic_name=self.topic_name,
default_message_time_to_live=self.default_message_time_to_live,
max_size_in_megabytes=self.max_size_in_megabytes,
requires_duplicate_detection=self.requires_duplicate_detection,
duplicate_detection_history_time_window=self.duplicate_detection_history_time_window,
enable_batched_operations=self.enable_batched_operations,
size_in_bytes=self.size_in_bytes,
filtering_messages_before_publishing=self.filtering_messages_before_publishing,
authorization_rules=self.authorization_rules,
support_ordering=self.support_ordering,
auto_delete_on_idle=self.auto_delete_on_idle,
enable_partitioning=self.enable_partitioning,
enable_express=self.enable_express,
user_metadata=self.user_metadata,
max_message_size_in_kilobytes=self.max_message_size_in_kilobytes,
)
self.log.info("Created Topic %s", topic.name)
return topic.name
class AzureServiceBusSubscriptionCreateOperator(BaseOperator):
"""Create an Azure Service Bus Topic Subscription under a Service Bus Namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusSubscriptionCreateOperator`
:param topic_name: The topic that will own the to-be-created subscription.
:param subscription_name: Name of the subscription that need to be created
:param lock_duration: ISO 8601 time span duration of a peek-lock; that is, the amount of time that
the message is locked for other receivers. The maximum value for LockDuration is 5 minutes; the
default value is 1 minute. Input value of either type ~datetime.timedelta or string in ISO 8601
duration format like "PT300S" is accepted.
:param requires_session: A value that indicates whether the queue supports the concept of sessions.
:param default_message_time_to_live: ISO 8601 default message time span to live value. This is the
duration after which the message expires, starting from when the message is sent to
Service Bus. This is the default value used when TimeToLive is not set on a message itself.
Input value of either type ~datetime.timedelta or string in ISO 8601 duration
format like "PT300S" is accepted.
:param dead_lettering_on_message_expiration: A value that indicates whether this subscription has
dead letter support when a message expires.
:param dead_lettering_on_filter_evaluation_exceptions: A value that indicates whether this
subscription has dead letter support when a message expires.
:param max_delivery_count: The maximum delivery count. A message is automatically dead lettered
after this number of deliveries. Default value is 10.
:param enable_batched_operations: Value that indicates whether server-side batched
operations are enabled.
:param forward_to: The name of the recipient entity to which all the messages sent to the
subscription are forwarded to.
:param user_metadata: Metadata associated with the subscription. Maximum number of characters is 1024.
:param forward_dead_lettered_messages_to: The name of the recipient entity to which all the
messages sent to the subscription are forwarded to.
:param auto_delete_on_idle: ISO 8601 time Span idle interval after which the subscription is
automatically deleted. The minimum duration is 5 minutes. Input value of either
type ~datetime.timedelta or string in ISO 8601 duration format like "PT300S" is accepted.
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection<howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("topic_name", "subscription_name")
ui_color = "#e4f0e8"
def __init__(
self,
*,
topic_name: str,
subscription_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
lock_duration: datetime.timedelta | str | None = None,
requires_session: bool | None = None,
default_message_time_to_live: datetime.timedelta | str | None = None,
dead_lettering_on_message_expiration: bool | None = True,
dead_lettering_on_filter_evaluation_exceptions: bool | None = None,
max_delivery_count: int | None = 10,
enable_batched_operations: bool | None = True,
forward_to: str | None = None,
user_metadata: str | None = None,
forward_dead_lettered_messages_to: str | None = None,
auto_delete_on_idle: datetime.timedelta | str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.topic_name = topic_name
self.subscription_name = subscription_name
self.lock_duration = lock_duration
self.requires_session = requires_session
self.default_message_time_to_live = default_message_time_to_live
self.dl_on_message_expiration = dead_lettering_on_message_expiration
self.dl_on_filter_evaluation_exceptions = dead_lettering_on_filter_evaluation_exceptions
self.max_delivery_count = max_delivery_count
self.enable_batched_operations = enable_batched_operations
self.forward_to = forward_to
self.user_metadata = user_metadata
self.forward_dead_lettered_messages_to = forward_dead_lettered_messages_to
self.auto_delete_on_idle = auto_delete_on_idle
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Creates Subscription in Service Bus namespace, by connecting to Service Bus Admin client."""
if self.subscription_name is None:
raise TypeError("Subscription name cannot be None.")
if self.topic_name is None:
raise TypeError("Topic name cannot be None.")
# Create the hook
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
with hook.get_conn() as service_mgmt_conn:
# create subscription with name
subscription = service_mgmt_conn.create_subscription(
topic_name=self.topic_name,
subscription_name=self.subscription_name,
lock_duration=self.lock_duration,
requires_session=self.requires_session,
default_message_time_to_live=self.default_message_time_to_live,
dead_lettering_on_message_expiration=self.dl_on_message_expiration,
dead_lettering_on_filter_evaluation_exceptions=self.dl_on_filter_evaluation_exceptions,
max_delivery_count=self.max_delivery_count,
enable_batched_operations=self.enable_batched_operations,
forward_to=self.forward_to,
user_metadata=self.user_metadata,
forward_dead_lettered_messages_to=self.forward_dead_lettered_messages_to,
auto_delete_on_idle=self.auto_delete_on_idle,
)
self.log.info("Created subscription %s", subscription.name)
class AzureServiceBusUpdateSubscriptionOperator(BaseOperator):
"""Update an Azure ServiceBus Topic Subscription under a ServiceBus Namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusUpdateSubscriptionOperator`
:param topic_name: The topic that will own the to-be-created subscription.
:param subscription_name: Name of the subscription that need to be created.
:param max_delivery_count: The maximum delivery count. A message is automatically dead lettered
after this number of deliveries. Default value is 10.
:param dead_lettering_on_message_expiration: A value that indicates whether this subscription
has dead letter support when a message expires.
:param enable_batched_operations: Value that indicates whether server-side batched
operations are enabled.
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection<howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("topic_name", "subscription_name")
ui_color = "#e4f0e8"
def __init__(
self,
*,
topic_name: str,
subscription_name: str,
max_delivery_count: int | None = None,
dead_lettering_on_message_expiration: bool | None = None,
enable_batched_operations: bool | None = None,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.topic_name = topic_name
self.subscription_name = subscription_name
self.max_delivery_count = max_delivery_count
self.dl_on_message_expiration = dead_lettering_on_message_expiration
self.enable_batched_operations = enable_batched_operations
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Updates Subscription properties, by connecting to Service Bus Admin client."""
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
with hook.get_conn() as service_mgmt_conn:
subscription_prop = service_mgmt_conn.get_subscription(self.topic_name, self.subscription_name)
if self.max_delivery_count:
subscription_prop.max_delivery_count = self.max_delivery_count
if self.dl_on_message_expiration is not None:
subscription_prop.dead_lettering_on_message_expiration = self.dl_on_message_expiration
if self.enable_batched_operations is not None:
subscription_prop.enable_batched_operations = self.enable_batched_operations
# update by updating the properties in the model
service_mgmt_conn.update_subscription(self.topic_name, subscription_prop)
updated_subscription = service_mgmt_conn.get_subscription(self.topic_name, self.subscription_name)
self.log.info("Subscription Updated successfully %s", updated_subscription)
class ASBReceiveSubscriptionMessageOperator(BaseOperator):
"""Receive a Batch messages from a Service Bus Subscription under specific Topic.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ASBReceiveSubscriptionMessageOperator`
:param subscription_name: The subscription name that will own the rule in topic
:param topic_name: The topic that will own the subscription rule.
:param max_message_count: Maximum number of messages in the batch.
Actual number returned will depend on prefetch_count and incoming stream rate.
Setting to None will fully depend on the prefetch config. The default value is 1.
:param max_wait_time: Maximum time to wait in seconds for the first message to arrive. If no
messages arrive, and no timeout is specified, this call will not return until the
connection is closed. If specified, an no messages arrive within the timeout period,
an empty list will be returned.
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection <howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("topic_name", "subscription_name")
ui_color = "#e4f0e8"
def __init__(
self,
*,
topic_name: str,
subscription_name: str,
max_message_count: int | None = 1,
max_wait_time: float | None = 5,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.topic_name = topic_name
self.subscription_name = subscription_name
self.max_message_count = max_message_count
self.max_wait_time = max_wait_time
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Receive Message in specific queue in Service Bus namespace by connecting to Service Bus client."""
# Create the hook
hook = MessageHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
# Receive message
hook.receive_subscription_message(
self.topic_name, self.subscription_name, self.max_message_count, self.max_wait_time
)
class AzureServiceBusSubscriptionDeleteOperator(BaseOperator):
"""Delete the topic subscription in the Azure ServiceBus namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusSubscriptionDeleteOperator`
:param topic_name: The topic that will own the to-be-created subscription.
:param subscription_name: Name of the subscription that need to be created
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection <howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("topic_name", "subscription_name")
ui_color = "#e4f0e8"
def __init__(
self,
*,
topic_name: str,
subscription_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.topic_name = topic_name
self.subscription_name = subscription_name
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Delete topic subscription in Service Bus namespace, by connecting to Service Bus Admin client."""
# Create the hook
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
# delete subscription with name
hook.delete_subscription(self.subscription_name, self.topic_name)
class AzureServiceBusTopicDeleteOperator(BaseOperator):
"""Delete the topic in the Azure Service Bus namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusTopicDeleteOperator`
:param topic_name: Name of the topic to be deleted.
:param azure_service_bus_conn_id: Reference to the
:ref:`Azure Service Bus connection <howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("topic_name",)
ui_color = "#e4f0e8"
def __init__(
self,
*,
topic_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.topic_name = topic_name
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Delete topic in Service Bus namespace, by connecting to Service Bus Admin client."""
if self.topic_name is None:
raise TypeError("Topic name cannot be None.")
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
with hook.get_conn() as service_mgmt_conn:
topic_properties = service_mgmt_conn.get_topic(self.topic_name)
if topic_properties and topic_properties.name == self.topic_name:
service_mgmt_conn.delete_topic(self.topic_name)
self.log.info("Topic %s deleted.", self.topic_name)
else:
self.log.info("Topic %s does not exist.", self.topic_name)
| 29,255 | 46.648208 | 110 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/operators/container_instances.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import re
from collections import namedtuple
from time import sleep
from typing import TYPE_CHECKING, Any, Sequence
from azure.mgmt.containerinstance.models import (
Container,
ContainerGroup,
ContainerGroupNetworkProfile,
ContainerPort,
EnvironmentVariable,
IpAddress,
ResourceRequests,
ResourceRequirements,
VolumeMount,
)
from msrestazure.azure_exceptions import CloudError
from airflow.exceptions import AirflowException, AirflowTaskTimeout
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.container_instance import AzureContainerInstanceHook
from airflow.providers.microsoft.azure.hooks.container_registry import AzureContainerRegistryHook
from airflow.providers.microsoft.azure.hooks.container_volume import AzureContainerVolumeHook
if TYPE_CHECKING:
from airflow.utils.context import Context
Volume = namedtuple(
"Volume",
["conn_id", "account_name", "share_name", "mount_path", "read_only"],
)
DEFAULT_ENVIRONMENT_VARIABLES: dict[str, str] = {}
DEFAULT_SECURED_VARIABLES: Sequence[str] = []
DEFAULT_VOLUMES: Sequence[Volume] = []
DEFAULT_MEMORY_IN_GB = 2.0
DEFAULT_CPU = 1.0
class AzureContainerInstancesOperator(BaseOperator):
"""
Start a container on Azure Container Instances.
:param ci_conn_id: connection id of a service principal which will be used
to start the container instance
:param registry_conn_id: connection id of a user which can login to a
private docker registry. For Azure use :ref:`Azure connection id<howto/connection:azure>`
:param resource_group: name of the resource group wherein this container
instance should be started
:param name: name of this container instance. Please note this name has
to be unique in order to run containers in parallel.
:param image: the docker image to be used
:param region: the region wherein this container instance should be started
:param environment_variables: key,value pairs containing environment
variables which will be passed to the running container
:param secured_variables: names of environmental variables that should not
be exposed outside the container (typically passwords).
:param volumes: list of ``Volume`` tuples to be mounted to the container.
Currently only Azure Fileshares are supported.
:param memory_in_gb: the amount of memory to allocate to this container
:param cpu: the number of cpus to allocate to this container
:param gpu: GPU Resource for the container.
:param command: the command to run inside the container
:param container_timeout: max time allowed for the execution of
the container instance.
:param tags: azure tags as dict of str:str
:param os_type: The operating system type required by the containers
in the container group. Possible values include: 'Windows', 'Linux'
:param restart_policy: Restart policy for all containers within the container group.
Possible values include: 'Always', 'OnFailure', 'Never'
:param ip_address: The IP address type of the container group.
:param network_profile: The network profile information for a container group.
**Example**::
AzureContainerInstancesOperator(
ci_conn_id = "azure_service_principal",
registry_conn_id = "azure_registry_user",
resource_group = "my-resource-group",
name = "my-container-name-{{ ds }}",
image = "myprivateregistry.azurecr.io/my_container:latest",
region = "westeurope",
environment_variables = {"MODEL_PATH": "my_value",
"POSTGRES_LOGIN": "{{ macros.connection('postgres_default').login }}",
"POSTGRES_PASSWORD": "{{ macros.connection('postgres_default').password }}",
"JOB_GUID": "{{ ti.xcom_pull(task_ids='task1', key='guid') }}" },
secured_variables = ['POSTGRES_PASSWORD'],
volumes = [("azure_container_instance_conn_id",
"my_storage_container",
"my_fileshare",
"/input-data",
True),],
memory_in_gb=14.0,
cpu=4.0,
gpu=GpuResource(count=1, sku='K80'),
command=["/bin/echo", "world"],
task_id="start_container"
)
"""
template_fields: Sequence[str] = ("name", "image", "command", "environment_variables")
template_fields_renderers = {"command": "bash", "environment_variables": "json"}
def __init__(
self,
*,
ci_conn_id: str,
registry_conn_id: str | None,
resource_group: str,
name: str,
image: str,
region: str,
environment_variables: dict | None = None,
secured_variables: str | None = None,
volumes: list | None = None,
memory_in_gb: Any | None = None,
cpu: Any | None = None,
gpu: Any | None = None,
command: list[str] | None = None,
remove_on_error: bool = True,
fail_if_exists: bool = True,
tags: dict[str, str] | None = None,
os_type: str = "Linux",
restart_policy: str = "Never",
ip_address: IpAddress | None = None,
ports: list[ContainerPort] | None = None,
network_profile: ContainerGroupNetworkProfile | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.ci_conn_id = ci_conn_id
self.resource_group = resource_group
self.name = self._check_name(name)
self.image = image
self.region = region
self.registry_conn_id = registry_conn_id
self.environment_variables = environment_variables or DEFAULT_ENVIRONMENT_VARIABLES
self.secured_variables = secured_variables or DEFAULT_SECURED_VARIABLES
self.volumes = volumes or DEFAULT_VOLUMES
self.memory_in_gb = memory_in_gb or DEFAULT_MEMORY_IN_GB
self.cpu = cpu or DEFAULT_CPU
self.gpu = gpu
self.command = command
self.remove_on_error = remove_on_error
self.fail_if_exists = fail_if_exists
self._ci_hook: Any = None
self.tags = tags
self.os_type = os_type
if self.os_type not in ["Linux", "Windows"]:
raise AirflowException(
"Invalid value for the os_type argument. "
"Please set 'Linux' or 'Windows' as the os_type. "
f"Found `{self.os_type}`."
)
self.restart_policy = restart_policy
if self.restart_policy not in ["Always", "OnFailure", "Never"]:
raise AirflowException(
"Invalid value for the restart_policy argument. "
"Please set one of 'Always', 'OnFailure','Never' as the restart_policy. "
f"Found `{self.restart_policy}`"
)
self.ip_address = ip_address
self.ports = ports
self.network_profile = network_profile
def execute(self, context: Context) -> int:
# Check name again in case it was templated.
self._check_name(self.name)
self._ci_hook = AzureContainerInstanceHook(azure_conn_id=self.ci_conn_id)
if self.fail_if_exists:
self.log.info("Testing if container group already exists")
if self._ci_hook.exists(self.resource_group, self.name):
raise AirflowException("Container group exists")
if self.registry_conn_id:
registry_hook = AzureContainerRegistryHook(self.registry_conn_id)
image_registry_credentials: list | None = [
registry_hook.connection,
]
else:
image_registry_credentials = None
environment_variables = []
for key, value in self.environment_variables.items():
if key in self.secured_variables:
e = EnvironmentVariable(name=key, secure_value=value)
else:
e = EnvironmentVariable(name=key, value=value)
environment_variables.append(e)
volumes: list[Volume | Volume] = []
volume_mounts: list[VolumeMount | VolumeMount] = []
for conn_id, account_name, share_name, mount_path, read_only in self.volumes:
hook = AzureContainerVolumeHook(conn_id)
mount_name = f"mount-{len(volumes)}"
volumes.append(hook.get_file_volume(mount_name, share_name, account_name, read_only))
volume_mounts.append(VolumeMount(name=mount_name, mount_path=mount_path, read_only=read_only))
exit_code = 1
try:
self.log.info("Starting container group with %.1f cpu %.1f mem", self.cpu, self.memory_in_gb)
if self.gpu:
self.log.info("GPU count: %.1f, GPU SKU: %s", self.gpu.count, self.gpu.sku)
resources = ResourceRequirements(
requests=ResourceRequests(memory_in_gb=self.memory_in_gb, cpu=self.cpu, gpu=self.gpu)
)
if self.ip_address and not self.ports:
self.ports = [ContainerPort(port=80)]
self.log.info("Default port set. Container will listen on port 80")
container = Container(
name=self.name,
image=self.image,
resources=resources,
command=self.command,
environment_variables=environment_variables,
volume_mounts=volume_mounts,
ports=self.ports,
)
container_group = ContainerGroup(
location=self.region,
containers=[
container,
],
image_registry_credentials=image_registry_credentials,
volumes=volumes,
restart_policy=self.restart_policy,
os_type=self.os_type,
tags=self.tags,
ip_address=self.ip_address,
network_profile=self.network_profile,
)
self._ci_hook.create_or_update(self.resource_group, self.name, container_group)
self.log.info("Container group started %s/%s", self.resource_group, self.name)
exit_code = self._monitor_logging(self.resource_group, self.name)
self.log.info("Container had exit code: %s", exit_code)
if exit_code != 0:
raise AirflowException(f"Container had a non-zero exit code, {exit_code}")
return exit_code
except CloudError:
self.log.exception("Could not start container group")
raise AirflowException("Could not start container group")
finally:
if exit_code == 0 or self.remove_on_error:
self.on_kill()
def on_kill(self) -> None:
if self.remove_on_error:
self.log.info("Deleting container group")
try:
self._ci_hook.delete(self.resource_group, self.name)
except Exception:
self.log.exception("Could not delete container group")
def _monitor_logging(self, resource_group: str, name: str) -> int:
last_state = None
last_message_logged = None
last_line_logged = None
while True:
try:
cg_state = self._ci_hook.get_state(resource_group, name)
instance_view = cg_state.containers[0].instance_view
# If there is no instance view, we show the provisioning state
if instance_view is not None:
c_state = instance_view.current_state
state, exit_code, detail_status = (
c_state.state,
c_state.exit_code,
c_state.detail_status,
)
else:
state = cg_state.provisioning_state
exit_code = 0
detail_status = "Provisioning"
if instance_view is not None and instance_view.events is not None:
messages = [event.message for event in instance_view.events]
last_message_logged = self._log_last(messages, last_message_logged)
if state != last_state:
self.log.info("Container group state changed to %s", state)
last_state = state
if state in ["Running", "Terminated", "Succeeded"]:
try:
logs = self._ci_hook.get_logs(resource_group, name)
last_line_logged = self._log_last(logs, last_line_logged)
except CloudError:
self.log.exception(
"Exception while getting logs from container instance, retrying..."
)
if state == "Terminated":
self.log.info("Container exited with detail_status %s", detail_status)
return exit_code
if state == "Failed":
self.log.error("Azure provision failure")
return 1
except AirflowTaskTimeout:
raise
except CloudError as err:
if "ResourceNotFound" in str(err):
self.log.warning(
"ResourceNotFound, container is probably removed "
"by another process "
"(make sure that the name is unique)."
)
return 1
else:
self.log.exception("Exception while getting container groups")
except Exception:
self.log.exception("Exception while getting container groups")
sleep(1)
def _log_last(self, logs: list | None, last_line_logged: Any) -> Any | None:
if logs:
# determine the last line which was logged before
last_line_index = 0
for i in range(len(logs) - 1, -1, -1):
if logs[i] == last_line_logged:
# this line is the same, hence print from i+1
last_line_index = i + 1
break
# log all new ones
for line in logs[last_line_index:]:
self.log.info(line.rstrip())
return logs[-1]
return None
@staticmethod
def _check_name(name: str) -> str:
if "{{" in name:
# Let macros pass as they cannot be checked at construction time
return name
regex_check = re.match("[a-z0-9]([-a-z0-9]*[a-z0-9])?", name)
if regex_check is None or regex_check.group() != name:
raise AirflowException('ACI name must match regex [a-z0-9]([-a-z0-9]*[a-z0-9])? (like "my-name")')
if len(name) > 63:
raise AirflowException("ACI name cannot be longer than 63 characters")
return name
| 15,968 | 40.803665 | 110 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/triggers/data_factory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
import time
from typing import Any, AsyncIterator
from azure.core.exceptions import ServiceRequestError
from airflow.providers.microsoft.azure.hooks.data_factory import (
AzureDataFactoryAsyncHook,
AzureDataFactoryPipelineRunStatus,
)
from airflow.triggers.base import BaseTrigger, TriggerEvent
class ADFPipelineRunStatusSensorTrigger(BaseTrigger):
"""Trigger with params to run the task when the ADF Pipeline is running.
:param run_id: The pipeline run identifier.
:param azure_data_factory_conn_id: The connection identifier for connecting to Azure Data Factory.
:param poke_interval: polling period in seconds to check for the status
:param resource_group_name: The resource group name.
:param factory_name: The data factory name.
"""
def __init__(
self,
run_id: str,
azure_data_factory_conn_id: str,
poke_interval: float,
resource_group_name: str | None = None,
factory_name: str | None = None,
):
super().__init__()
self.run_id = run_id
self.azure_data_factory_conn_id = azure_data_factory_conn_id
self.resource_group_name = resource_group_name
self.factory_name = factory_name
self.poke_interval = poke_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes ADFPipelineRunStatusSensorTrigger arguments and classpath."""
return (
"airflow.providers.microsoft.azure.triggers.data_factory.ADFPipelineRunStatusSensorTrigger",
{
"run_id": self.run_id,
"azure_data_factory_conn_id": self.azure_data_factory_conn_id,
"resource_group_name": self.resource_group_name,
"factory_name": self.factory_name,
"poke_interval": self.poke_interval,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make async connection to Azure Data Factory, polls for the pipeline run status."""
hook = AzureDataFactoryAsyncHook(azure_data_factory_conn_id=self.azure_data_factory_conn_id)
executed_after_token_refresh = False
try:
while True:
try:
pipeline_status = await hook.get_adf_pipeline_run_status(
run_id=self.run_id,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
)
executed_after_token_refresh = False
if pipeline_status == AzureDataFactoryPipelineRunStatus.FAILED:
yield TriggerEvent(
{"status": "error", "message": f"Pipeline run {self.run_id} has Failed."}
)
return
elif pipeline_status == AzureDataFactoryPipelineRunStatus.CANCELLED:
msg = f"Pipeline run {self.run_id} has been Cancelled."
yield TriggerEvent({"status": "error", "message": msg})
return
elif pipeline_status == AzureDataFactoryPipelineRunStatus.SUCCEEDED:
msg = f"Pipeline run {self.run_id} has been Succeeded."
yield TriggerEvent({"status": "success", "message": msg})
return
await asyncio.sleep(self.poke_interval)
except ServiceRequestError:
# conn might expire during long running pipeline.
# If expcetion is caught, it tries to refresh connection once.
# If it still doesn't fix the issue,
# than the execute_after_token_refresh would still be False
# and an exception will be raised
if executed_after_token_refresh:
await hook.refresh_conn()
executed_after_token_refresh = False
continue
raise
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
class AzureDataFactoryTrigger(BaseTrigger):
"""Trigger when the Azure data factory pipeline job finishes.
When wait_for_termination is set to False, it triggers immediately with success status.
:param run_id: Run id of a Azure data pipeline run job.
:param azure_data_factory_conn_id: The connection identifier for connecting to Azure Data Factory.
:param end_time: Time in seconds when triggers will timeout.
:param resource_group_name: The resource group name.
:param factory_name: The data factory name.
:param wait_for_termination: Flag to wait on a pipeline run's termination.
:param check_interval: Time in seconds to check on a pipeline run's status.
"""
def __init__(
self,
run_id: str,
azure_data_factory_conn_id: str,
end_time: float,
resource_group_name: str | None = None,
factory_name: str | None = None,
wait_for_termination: bool = True,
check_interval: int = 60,
):
super().__init__()
self.azure_data_factory_conn_id = azure_data_factory_conn_id
self.check_interval = check_interval
self.run_id = run_id
self.wait_for_termination = wait_for_termination
self.resource_group_name = resource_group_name
self.factory_name = factory_name
self.end_time = end_time
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes AzureDataFactoryTrigger arguments and classpath."""
return (
"airflow.providers.microsoft.azure.triggers.data_factory.AzureDataFactoryTrigger",
{
"azure_data_factory_conn_id": self.azure_data_factory_conn_id,
"check_interval": self.check_interval,
"run_id": self.run_id,
"wait_for_termination": self.wait_for_termination,
"resource_group_name": self.resource_group_name,
"factory_name": self.factory_name,
"end_time": self.end_time,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Make async connection to Azure Data Factory, polls for the pipeline run status."""
hook = AzureDataFactoryAsyncHook(azure_data_factory_conn_id=self.azure_data_factory_conn_id)
try:
pipeline_status = await hook.get_adf_pipeline_run_status(
run_id=self.run_id,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
)
executed_after_token_refresh = True
if self.wait_for_termination:
while self.end_time > time.time():
try:
pipeline_status = await hook.get_adf_pipeline_run_status(
run_id=self.run_id,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
)
executed_after_token_refresh = True
if pipeline_status in AzureDataFactoryPipelineRunStatus.FAILURE_STATES:
yield TriggerEvent(
{
"status": "error",
"message": f"The pipeline run {self.run_id} has {pipeline_status}.",
"run_id": self.run_id,
}
)
return
elif pipeline_status == AzureDataFactoryPipelineRunStatus.SUCCEEDED:
yield TriggerEvent(
{
"status": "success",
"message": f"The pipeline run {self.run_id} has {pipeline_status}.",
"run_id": self.run_id,
}
)
return
self.log.info(
"Sleeping for %s. The pipeline state is %s.", self.check_interval, pipeline_status
)
await asyncio.sleep(self.check_interval)
except ServiceRequestError:
# conn might expire during long running pipeline.
# If expcetion is caught, it tries to refresh connection once.
# If it still doesn't fix the issue,
# than the execute_after_token_refresh would still be False
# and an exception will be raised
if executed_after_token_refresh:
await hook.refresh_conn()
executed_after_token_refresh = False
continue
raise
yield TriggerEvent(
{
"status": "error",
"message": f"Timeout: The pipeline run {self.run_id} has {pipeline_status}.",
"run_id": self.run_id,
}
)
else:
yield TriggerEvent(
{
"status": "success",
"message": f"The pipeline run {self.run_id} has {pipeline_status} status.",
"run_id": self.run_id,
}
)
except Exception as e:
if self.run_id:
try:
await hook.cancel_pipeline_run(
run_id=self.run_id,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
)
self.log.info("Unexpected error %s caught. Cancel pipeline run %s", str(e), self.run_id)
except Exception as err:
yield TriggerEvent({"status": "error", "message": str(err), "run_id": self.run_id})
yield TriggerEvent({"status": "error", "message": str(e), "run_id": self.run_id})
| 11,197 | 45.658333 | 110 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/triggers/wasb.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator
from airflow.providers.microsoft.azure.hooks.wasb import WasbAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class WasbBlobSensorTrigger(BaseTrigger):
"""
Checks for existence of the given blob in the provided container.
WasbBlobSensorTrigger is fired as deferred class with params to run the task in trigger worker.
:param container_name: name of the container in which the blob should be searched for
:param blob_name: name of the blob to check existence for
:param wasb_conn_id: the connection identifier for connecting to Azure WASB
:param poke_interval: polling period in seconds to check for the status
:param public_read: whether an anonymous public read access should be used. Default is False
"""
def __init__(
self,
container_name: str,
blob_name: str,
wasb_conn_id: str = "wasb_default",
public_read: bool = False,
poke_interval: float = 5.0,
):
super().__init__()
self.container_name = container_name
self.blob_name = blob_name
self.wasb_conn_id = wasb_conn_id
self.poke_interval = poke_interval
self.public_read = public_read
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes WasbBlobSensorTrigger arguments and classpath."""
return (
"airflow.providers.microsoft.azure.triggers.wasb.WasbBlobSensorTrigger",
{
"container_name": self.container_name,
"blob_name": self.blob_name,
"wasb_conn_id": self.wasb_conn_id,
"poke_interval": self.poke_interval,
"public_read": self.public_read,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Makes async connection to Azure WASB and polls for existence of the given blob name."""
blob_exists = False
hook = WasbAsyncHook(wasb_conn_id=self.wasb_conn_id, public_read=self.public_read)
try:
async with await hook.get_async_conn():
while not blob_exists:
blob_exists = await hook.check_for_blob_async(
container_name=self.container_name,
blob_name=self.blob_name,
)
if blob_exists:
message = f"Blob {self.blob_name} found in container {self.container_name}."
yield TriggerEvent({"status": "success", "message": message})
else:
message = (
f"Blob {self.blob_name} not available yet in container {self.container_name}."
f" Sleeping for {self.poke_interval} seconds"
)
self.log.info(message)
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
class WasbPrefixSensorTrigger(BaseTrigger):
"""
Checks for the existence of a blob with the given prefix in the provided container.
WasbPrefixSensorTrigger is fired as a deferred class with params to run the task in trigger.
:param container_name: name of the container in which the blob should be searched for
:param prefix: prefix of the blob to check existence for
:param include: specifies one or more additional datasets to include in the
response. Options include: ``snapshots``, ``metadata``, ``uncommittedblobs``,
``copy``, ``deleted``
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
:param wasb_conn_id: the connection identifier for connecting to Azure WASB
:param poke_interval: polling period in seconds to check for the status
:param public_read: whether an anonymous public read access should be used. Default is False
"""
def __init__(
self,
container_name: str,
prefix: str,
include: list[str] | None = None,
delimiter: str = "/",
wasb_conn_id: str = "wasb_default",
public_read: bool = False,
poke_interval: float = 5.0,
):
super().__init__()
self.container_name = container_name
self.prefix = prefix
self.include = include
self.delimiter = delimiter
self.wasb_conn_id = wasb_conn_id
self.poke_interval = poke_interval
self.public_read = public_read
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes WasbPrefixSensorTrigger arguments and classpath."""
return (
"airflow.providers.microsoft.azure.triggers.wasb.WasbPrefixSensorTrigger",
{
"container_name": self.container_name,
"prefix": self.prefix,
"include": self.include,
"delimiter": self.delimiter,
"wasb_conn_id": self.wasb_conn_id,
"poke_interval": self.poke_interval,
"public_read": self.public_read,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Makes async connection to Azure WASB and polls for existence of a blob with given prefix."""
prefix_exists = False
hook = WasbAsyncHook(wasb_conn_id=self.wasb_conn_id, public_read=self.public_read)
try:
async with await hook.get_async_conn():
while not prefix_exists:
prefix_exists = await hook.check_for_prefix_async(
container_name=self.container_name,
prefix=self.prefix,
include=self.include,
delimiter=self.delimiter,
)
if prefix_exists:
message = f"Prefix {self.prefix} found in container {self.container_name}."
yield TriggerEvent({"status": "success", "message": message})
else:
message = (
f"Prefix {self.prefix} not available yet in container {self.container_name}."
f" Sleeping for {self.poke_interval} seconds"
)
self.log.info(message)
await asyncio.sleep(self.poke_interval)
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
| 7,382 | 42.946429 | 106 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/triggers/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/hooks/data_factory.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Spelling exceptions.
.. spelling:word-list::
CreateRunResponse
DatasetResource
LinkedServiceResource
LROPoller
PipelineResource
PipelineRun
TriggerResource
datafactory
DataFlow
mgmt
"""
from __future__ import annotations
import inspect
import time
from functools import wraps
from typing import Any, Callable, TypeVar, Union, cast
from asgiref.sync import sync_to_async
from azure.core.exceptions import ServiceRequestError
from azure.core.polling import LROPoller
from azure.identity import ClientSecretCredential, DefaultAzureCredential
from azure.identity.aio import (
ClientSecretCredential as AsyncClientSecretCredential,
DefaultAzureCredential as AsyncDefaultAzureCredential,
)
from azure.mgmt.datafactory import DataFactoryManagementClient
from azure.mgmt.datafactory.aio import DataFactoryManagementClient as AsyncDataFactoryManagementClient
from azure.mgmt.datafactory.models import (
CreateRunResponse,
DataFlow,
DatasetResource,
Factory,
LinkedServiceResource,
PipelineResource,
PipelineRun,
TriggerResource,
)
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.typing_compat import TypedDict
Credentials = Union[ClientSecretCredential, DefaultAzureCredential]
AsyncCredentials = Union[AsyncClientSecretCredential, AsyncDefaultAzureCredential]
T = TypeVar("T", bound=Any)
def provide_targeted_factory(func: Callable) -> Callable:
"""
Provide the targeted factory to the decorated function in case it isn't specified.
If ``resource_group_name`` or ``factory_name`` is not provided it defaults to the value specified in
the connection extras.
"""
signature = inspect.signature(func)
@wraps(func)
def wrapper(*args, **kwargs) -> Callable:
bound_args = signature.bind(*args, **kwargs)
def bind_argument(arg, default_key):
# Check if arg was not included in the function signature or, if it is, the value is not provided.
if arg not in bound_args.arguments or bound_args.arguments[arg] is None:
self = args[0]
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
default_value = extras.get(default_key) or extras.get(
f"extra__azure_data_factory__{default_key}"
)
if not default_value:
raise AirflowException("Could not determine the targeted data factory.")
bound_args.arguments[arg] = default_value
bind_argument("resource_group_name", "resource_group_name")
bind_argument("factory_name", "factory_name")
return func(*bound_args.args, **bound_args.kwargs)
return wrapper
class PipelineRunInfo(TypedDict):
"""Type class for the pipeline run info dictionary."""
run_id: str
factory_name: str | None
resource_group_name: str | None
class AzureDataFactoryPipelineRunStatus:
"""Azure Data Factory pipeline operation statuses."""
QUEUED = "Queued"
IN_PROGRESS = "InProgress"
SUCCEEDED = "Succeeded"
FAILED = "Failed"
CANCELING = "Canceling"
CANCELLED = "Cancelled"
TERMINAL_STATUSES = {CANCELLED, FAILED, SUCCEEDED}
INTERMEDIATE_STATES = {QUEUED, IN_PROGRESS, CANCELING}
FAILURE_STATES = {FAILED, CANCELLED}
class AzureDataFactoryPipelineRunException(AirflowException):
"""An exception that indicates a pipeline run failed to complete."""
def get_field(extras: dict, field_name: str, strict: bool = False):
"""Get field from extra, first checking short name, then for backcompat we check for prefixed name."""
backcompat_prefix = "extra__azure_data_factory__"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix "
"when using this method."
)
if field_name in extras:
return extras[field_name] or None
prefixed_name = f"{backcompat_prefix}{field_name}"
if prefixed_name in extras:
return extras[prefixed_name] or None
if strict:
raise KeyError(f"Field {field_name} not found in extras")
class AzureDataFactoryHook(BaseHook):
"""
A hook to interact with Azure Data Factory.
:param azure_data_factory_conn_id: The :ref:`Azure Data Factory connection id<howto/connection:adf>`.
"""
conn_type: str = "azure_data_factory"
conn_name_attr: str = "azure_data_factory_conn_id"
default_conn_name: str = "azure_data_factory_default"
hook_name: str = "Azure Data Factory"
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"tenantId": StringField(lazy_gettext("Tenant ID"), widget=BS3TextFieldWidget()),
"subscriptionId": StringField(lazy_gettext("Subscription ID"), widget=BS3TextFieldWidget()),
"resource_group_name": StringField(
lazy_gettext("Resource Group Name"), widget=BS3TextFieldWidget()
),
"factory_name": StringField(lazy_gettext("Factory Name"), widget=BS3TextFieldWidget()),
}
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "host", "extra"],
"relabeling": {
"login": "Client ID",
"password": "Secret",
},
}
def __init__(self, azure_data_factory_conn_id: str = default_conn_name):
self._conn: DataFactoryManagementClient = None
self.conn_id = azure_data_factory_conn_id
super().__init__()
def get_conn(self) -> DataFactoryManagementClient:
if self._conn is not None:
return self._conn
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
tenant = get_field(extras, "tenantId")
try:
subscription_id = get_field(extras, "subscriptionId", strict=True)
except KeyError:
raise ValueError("A Subscription ID is required to connect to Azure Data Factory.")
credential: Credentials
if conn.login is not None and conn.password is not None:
if not tenant:
raise ValueError("A Tenant ID is required when authenticating with Client ID and Secret.")
credential = ClientSecretCredential(
client_id=conn.login, client_secret=conn.password, tenant_id=tenant
)
else:
credential = DefaultAzureCredential()
self._conn = self._create_client(credential, subscription_id)
return self._conn
def refresh_conn(self) -> DataFactoryManagementClient:
self._conn = None
return self.get_conn()
@provide_targeted_factory
def get_factory(
self, resource_group_name: str | None = None, factory_name: str | None = None, **config: Any
) -> Factory:
"""
Get the factory.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: The factory.
"""
return self.get_conn().factories.get(resource_group_name, factory_name, **config)
def _factory_exists(self, resource_group_name, factory_name) -> bool:
"""Return whether or not the factory already exists."""
factories = {
factory.name for factory in self.get_conn().factories.list_by_resource_group(resource_group_name)
}
return factory_name in factories
@staticmethod
def _create_client(credential: Credentials, subscription_id: str):
return DataFactoryManagementClient(
credential=credential,
subscription_id=subscription_id,
)
@provide_targeted_factory
def update_factory(
self,
factory: Factory,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> Factory:
"""
Update the factory.
:param factory: The factory resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the factory does not exist.
:return: The factory.
"""
if not self._factory_exists(resource_group_name, factory_name):
raise AirflowException(f"Factory {factory!r} does not exist.")
return self.get_conn().factories.create_or_update(
resource_group_name, factory_name, factory, **config
)
@provide_targeted_factory
def create_factory(
self,
factory: Factory,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> Factory:
"""
Create the factory.
:param factory: The factory resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the factory already exists.
:return: The factory.
"""
if self._factory_exists(resource_group_name, factory_name):
raise AirflowException(f"Factory {factory!r} already exists.")
return self.get_conn().factories.create_or_update(
resource_group_name, factory_name, factory, **config
)
@provide_targeted_factory
def delete_factory(
self, resource_group_name: str | None = None, factory_name: str | None = None, **config: Any
) -> None:
"""
Delete the factory.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
self.get_conn().factories.delete(resource_group_name, factory_name, **config)
@provide_targeted_factory
def get_linked_service(
self,
linked_service_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> LinkedServiceResource:
"""
Get the linked service.
:param linked_service_name: The linked service name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: The linked service.
"""
return self.get_conn().linked_services.get(
resource_group_name, factory_name, linked_service_name, **config
)
def _linked_service_exists(self, resource_group_name, factory_name, linked_service_name) -> bool:
"""Return whether or not the linked service already exists."""
linked_services = {
linked_service.name
for linked_service in self.get_conn().linked_services.list_by_factory(
resource_group_name, factory_name
)
}
return linked_service_name in linked_services
@provide_targeted_factory
def update_linked_service(
self,
linked_service_name: str,
linked_service: LinkedServiceResource,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> LinkedServiceResource:
"""
Update the linked service.
:param linked_service_name: The linked service name.
:param linked_service: The linked service resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the linked service does not exist.
:return: The linked service.
"""
if not self._linked_service_exists(resource_group_name, factory_name, linked_service_name):
raise AirflowException(f"Linked service {linked_service_name!r} does not exist.")
return self.get_conn().linked_services.create_or_update(
resource_group_name, factory_name, linked_service_name, linked_service, **config
)
@provide_targeted_factory
def create_linked_service(
self,
linked_service_name: str,
linked_service: LinkedServiceResource,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> LinkedServiceResource:
"""
Create the linked service.
:param linked_service_name: The linked service name.
:param linked_service: The linked service resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the linked service already exists.
:return: The linked service.
"""
if self._linked_service_exists(resource_group_name, factory_name, linked_service_name):
raise AirflowException(f"Linked service {linked_service_name!r} already exists.")
return self.get_conn().linked_services.create_or_update(
resource_group_name, factory_name, linked_service_name, linked_service, **config
)
@provide_targeted_factory
def delete_linked_service(
self,
linked_service_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> None:
"""
Delete the linked service.
:param linked_service_name: The linked service name.
:param resource_group_name: The linked service name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
self.get_conn().linked_services.delete(
resource_group_name, factory_name, linked_service_name, **config
)
@provide_targeted_factory
def get_dataset(
self,
dataset_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> DatasetResource:
"""
Get the dataset.
:param dataset_name: The dataset name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: The dataset.
"""
return self.get_conn().datasets.get(resource_group_name, factory_name, dataset_name, **config)
def _dataset_exists(self, resource_group_name, factory_name, dataset_name) -> bool:
"""Return whether or not the dataset already exists."""
datasets = {
dataset.name
for dataset in self.get_conn().datasets.list_by_factory(resource_group_name, factory_name)
}
return dataset_name in datasets
@provide_targeted_factory
def update_dataset(
self,
dataset_name: str,
dataset: DatasetResource,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> DatasetResource:
"""
Update the dataset.
:param dataset_name: The dataset name.
:param dataset: The dataset resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the dataset does not exist.
:return: The dataset.
"""
if not self._dataset_exists(resource_group_name, factory_name, dataset_name):
raise AirflowException(f"Dataset {dataset_name!r} does not exist.")
return self.get_conn().datasets.create_or_update(
resource_group_name, factory_name, dataset_name, dataset, **config
)
@provide_targeted_factory
def create_dataset(
self,
dataset_name: str,
dataset: DatasetResource,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> DatasetResource:
"""
Create the dataset.
:param dataset_name: The dataset name.
:param dataset: The dataset resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the dataset already exists.
:return: The dataset.
"""
if self._dataset_exists(resource_group_name, factory_name, dataset_name):
raise AirflowException(f"Dataset {dataset_name!r} already exists.")
return self.get_conn().datasets.create_or_update(
resource_group_name, factory_name, dataset_name, dataset, **config
)
@provide_targeted_factory
def delete_dataset(
self,
dataset_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> None:
"""
Delete the dataset.
:param dataset_name: The dataset name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
self.get_conn().datasets.delete(resource_group_name, factory_name, dataset_name, **config)
@provide_targeted_factory
def get_dataflow(
self,
dataflow_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> DataFlow:
"""
Get the dataflow.
:param dataflow_name: The dataflow name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: The dataflow.
"""
return self.get_conn().data_flows.get(resource_group_name, factory_name, dataflow_name, **config)
def _dataflow_exists(
self,
dataflow_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
) -> bool:
"""Return whether the dataflow already exists."""
dataflows = {
dataflow.name
for dataflow in self.get_conn().data_flows.list_by_factory(resource_group_name, factory_name)
}
return dataflow_name in dataflows
@provide_targeted_factory
def update_dataflow(
self,
dataflow_name: str,
dataflow: DataFlow,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> DataFlow:
"""
Update the dataflow.
:param dataflow_name: The dataflow name.
:param dataflow: The dataflow resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the dataset does not exist.
:return: The dataflow.
"""
if not self._dataflow_exists(
dataflow_name,
resource_group_name,
factory_name,
):
raise AirflowException(f"Dataflow {dataflow_name!r} does not exist.")
return self.get_conn().data_flows.create_or_update(
resource_group_name, factory_name, dataflow_name, dataflow, **config
)
@provide_targeted_factory
def create_dataflow(
self,
dataflow_name: str,
dataflow: DataFlow,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> DataFlow:
"""
Create the dataflow.
:param dataflow_name: The dataflow name.
:param dataflow: The dataflow resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the dataset already exists.
:return: The dataset.
"""
if self._dataflow_exists(dataflow_name, resource_group_name, factory_name):
raise AirflowException(f"Dataflow {dataflow_name!r} already exists.")
return self.get_conn().data_flows.create_or_update(
resource_group_name, factory_name, dataflow_name, dataflow, **config
)
@provide_targeted_factory
def delete_dataflow(
self,
dataflow_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> None:
"""
Delete the dataflow.
:param dataflow_name: The dataflow name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
self.get_conn().data_flows.delete(resource_group_name, factory_name, dataflow_name, **config)
@provide_targeted_factory
def get_pipeline(
self,
pipeline_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> PipelineResource:
"""
Get the pipeline.
:param pipeline_name: The pipeline name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: The pipeline.
"""
return self.get_conn().pipelines.get(resource_group_name, factory_name, pipeline_name, **config)
def _pipeline_exists(self, resource_group_name, factory_name, pipeline_name) -> bool:
"""Return whether or not the pipeline already exists."""
pipelines = {
pipeline.name
for pipeline in self.get_conn().pipelines.list_by_factory(resource_group_name, factory_name)
}
return pipeline_name in pipelines
@provide_targeted_factory
def update_pipeline(
self,
pipeline_name: str,
pipeline: PipelineResource,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> PipelineResource:
"""
Update the pipeline.
:param pipeline_name: The pipeline name.
:param pipeline: The pipeline resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the pipeline does not exist.
:return: The pipeline.
"""
if not self._pipeline_exists(resource_group_name, factory_name, pipeline_name):
raise AirflowException(f"Pipeline {pipeline_name!r} does not exist.")
return self.get_conn().pipelines.create_or_update(
resource_group_name, factory_name, pipeline_name, pipeline, **config
)
@provide_targeted_factory
def create_pipeline(
self,
pipeline_name: str,
pipeline: PipelineResource,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> PipelineResource:
"""
Create the pipeline.
:param pipeline_name: The pipeline name.
:param pipeline: The pipeline resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the pipeline already exists.
:return: The pipeline.
"""
if self._pipeline_exists(resource_group_name, factory_name, pipeline_name):
raise AirflowException(f"Pipeline {pipeline_name!r} already exists.")
return self.get_conn().pipelines.create_or_update(
resource_group_name, factory_name, pipeline_name, pipeline, **config
)
@provide_targeted_factory
def delete_pipeline(
self,
pipeline_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> None:
"""
Delete the pipeline.
:param pipeline_name: The pipeline name.
:param resource_group_name: The pipeline name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
self.get_conn().pipelines.delete(resource_group_name, factory_name, pipeline_name, **config)
@provide_targeted_factory
def run_pipeline(
self,
pipeline_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> CreateRunResponse:
"""
Run a pipeline.
:param pipeline_name: The pipeline name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: The pipeline run.
"""
return self.get_conn().pipelines.create_run(
resource_group_name, factory_name, pipeline_name, **config
)
@provide_targeted_factory
def get_pipeline_run(
self,
run_id: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> PipelineRun:
"""
Get the pipeline run.
:param run_id: The pipeline run identifier.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: The pipeline run.
"""
return self.get_conn().pipeline_runs.get(resource_group_name, factory_name, run_id, **config)
def get_pipeline_run_status(
self,
run_id: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
) -> str:
"""
Get a pipeline run's current status.
:param run_id: The pipeline run identifier.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:return: The status of the pipeline run.
"""
self.log.info("Getting the status of run ID %s.", run_id)
pipeline_run_status = self.get_pipeline_run(
run_id=run_id,
factory_name=factory_name,
resource_group_name=resource_group_name,
).status
self.log.info("Current status of pipeline run %s: %s", run_id, pipeline_run_status)
return pipeline_run_status
def wait_for_pipeline_run_status(
self,
run_id: str,
expected_statuses: str | set[str],
resource_group_name: str | None = None,
factory_name: str | None = None,
check_interval: int = 60,
timeout: int = 60 * 60 * 24 * 7,
) -> bool:
"""
Waits for a pipeline run to match an expected status.
:param run_id: The pipeline run identifier.
:param expected_statuses: The desired status(es) to check against a pipeline run's current status.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param check_interval: Time in seconds to check on a pipeline run's status.
:param timeout: Time in seconds to wait for a pipeline to reach a terminal status or the expected
status.
:return: Boolean indicating if the pipeline run has reached the ``expected_status``.
"""
pipeline_run_info = PipelineRunInfo(
run_id=run_id,
factory_name=factory_name,
resource_group_name=resource_group_name,
)
pipeline_run_status = self.get_pipeline_run_status(**pipeline_run_info)
executed_after_token_refresh = True
start_time = time.monotonic()
while (
pipeline_run_status not in AzureDataFactoryPipelineRunStatus.TERMINAL_STATUSES
and pipeline_run_status not in expected_statuses
):
# Check if the pipeline-run duration has exceeded the ``timeout`` configured.
if start_time + timeout < time.monotonic():
raise AzureDataFactoryPipelineRunException(
f"Pipeline run {run_id} has not reached a terminal status after {timeout} seconds."
)
# Wait to check the status of the pipeline run based on the ``check_interval`` configured.
time.sleep(check_interval)
try:
pipeline_run_status = self.get_pipeline_run_status(**pipeline_run_info)
executed_after_token_refresh = True
except ServiceRequestError:
if executed_after_token_refresh:
self.refresh_conn()
continue
raise
return pipeline_run_status in expected_statuses
@provide_targeted_factory
def cancel_pipeline_run(
self,
run_id: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> None:
"""
Cancel the pipeline run.
:param run_id: The pipeline run identifier.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
self.get_conn().pipeline_runs.cancel(resource_group_name, factory_name, run_id, **config)
@provide_targeted_factory
def get_trigger(
self,
trigger_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> TriggerResource:
"""
Get the trigger.
:param trigger_name: The trigger name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: The trigger.
"""
return self.get_conn().triggers.get(resource_group_name, factory_name, trigger_name, **config)
def _trigger_exists(self, resource_group_name, factory_name, trigger_name) -> bool:
"""Return whether or not the trigger already exists."""
triggers = {
trigger.name
for trigger in self.get_conn().triggers.list_by_factory(resource_group_name, factory_name)
}
return trigger_name in triggers
@provide_targeted_factory
def update_trigger(
self,
trigger_name: str,
trigger: TriggerResource,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> TriggerResource:
"""
Update the trigger.
:param trigger_name: The trigger name.
:param trigger: The trigger resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the trigger does not exist.
:return: The trigger.
"""
if not self._trigger_exists(resource_group_name, factory_name, trigger_name):
raise AirflowException(f"Trigger {trigger_name!r} does not exist.")
return self.get_conn().triggers.create_or_update(
resource_group_name, factory_name, trigger_name, trigger, **config
)
@provide_targeted_factory
def create_trigger(
self,
trigger_name: str,
trigger: TriggerResource,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> TriggerResource:
"""
Create the trigger.
:param trigger_name: The trigger name.
:param trigger: The trigger resource definition.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:raise AirflowException: If the trigger already exists.
:return: The trigger.
"""
if self._trigger_exists(resource_group_name, factory_name, trigger_name):
raise AirflowException(f"Trigger {trigger_name!r} already exists.")
return self.get_conn().triggers.create_or_update(
resource_group_name, factory_name, trigger_name, trigger, **config
)
@provide_targeted_factory
def delete_trigger(
self,
trigger_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> None:
"""
Delete the trigger.
:param trigger_name: The trigger name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
self.get_conn().triggers.delete(resource_group_name, factory_name, trigger_name, **config)
@provide_targeted_factory
def start_trigger(
self,
trigger_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> LROPoller:
"""
Start the trigger.
:param trigger_name: The trigger name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: An Azure operation poller.
"""
return self.get_conn().triggers.begin_start(resource_group_name, factory_name, trigger_name, **config)
@provide_targeted_factory
def stop_trigger(
self,
trigger_name: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> LROPoller:
"""
Stop the trigger.
:param trigger_name: The trigger name.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
:return: An Azure operation poller.
"""
return self.get_conn().triggers.begin_stop(resource_group_name, factory_name, trigger_name, **config)
@provide_targeted_factory
def rerun_trigger(
self,
trigger_name: str,
run_id: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> None:
"""
Rerun the trigger.
:param trigger_name: The trigger name.
:param run_id: The trigger run identifier.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
return self.get_conn().trigger_runs.rerun(
resource_group_name, factory_name, trigger_name, run_id, **config
)
@provide_targeted_factory
def cancel_trigger(
self,
trigger_name: str,
run_id: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> None:
"""
Cancel the trigger.
:param trigger_name: The trigger name.
:param run_id: The trigger run identifier.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
self.get_conn().trigger_runs.cancel(resource_group_name, factory_name, trigger_name, run_id, **config)
def test_connection(self) -> tuple[bool, str]:
"""Test a configured Azure Data Factory connection."""
success = (True, "Successfully connected to Azure Data Factory.")
try:
# Attempt to list existing factories under the configured subscription and retrieve the first in
# the returned iterator. The Azure Data Factory API does allow for creation of a
# DataFactoryManagementClient with incorrect values but then will fail properly once items are
# retrieved using the client. We need to _actually_ try to retrieve an object to properly test the
# connection.
next(self.get_conn().factories.list())
return success
except StopIteration:
# If the iterator returned is empty it should still be considered a successful connection since
# it's possible to create a Data Factory via the ``AzureDataFactoryHook`` and none could
# legitimately exist yet.
return success
except Exception as e:
return False, str(e)
def provide_targeted_factory_async(func: T) -> T:
"""
Provide the targeted factory to the async decorated function in case it isn't specified.
If ``resource_group_name`` or ``factory_name`` is not provided it defaults to the value specified in
the connection extras.
"""
signature = inspect.signature(func)
@wraps(func)
async def wrapper(*args: Any, **kwargs: Any) -> Any:
bound_args = signature.bind(*args, **kwargs)
async def bind_argument(arg: Any, default_key: str) -> None:
# Check if arg was not included in the function signature or, if it is, the value is not provided.
if arg not in bound_args.arguments or bound_args.arguments[arg] is None:
self = args[0]
conn = await sync_to_async(self.get_connection)(self.conn_id)
extras = conn.extra_dejson
default_value = extras.get(default_key) or extras.get(
f"extra__azure_data_factory__{default_key}"
)
if not default_value:
raise AirflowException("Could not determine the targeted data factory.")
bound_args.arguments[arg] = default_value
await bind_argument("resource_group_name", "resource_group_name")
await bind_argument("factory_name", "factory_name")
return await func(*bound_args.args, **bound_args.kwargs)
return cast(T, wrapper)
class AzureDataFactoryAsyncHook(AzureDataFactoryHook):
"""
An Async Hook that connects to Azure DataFactory to perform pipeline operations.
:param azure_data_factory_conn_id: The :ref:`Azure Data Factory connection id<howto/connection:adf>`.
"""
default_conn_name: str = "azure_data_factory_default"
def __init__(self, azure_data_factory_conn_id: str = default_conn_name):
self._async_conn: AsyncDataFactoryManagementClient = None
self.conn_id = azure_data_factory_conn_id
super().__init__(azure_data_factory_conn_id=azure_data_factory_conn_id)
async def get_async_conn(self) -> AsyncDataFactoryManagementClient:
"""Get async connection and connect to azure data factory."""
if self._async_conn is not None:
return self._async_conn
conn = await sync_to_async(self.get_connection)(self.conn_id)
extras = conn.extra_dejson
tenant = get_field(extras, "tenantId")
try:
subscription_id = get_field(extras, "subscriptionId", strict=True)
except KeyError:
raise ValueError("A Subscription ID is required to connect to Azure Data Factory.")
credential: AsyncCredentials
if conn.login is not None and conn.password is not None:
if not tenant:
raise ValueError("A Tenant ID is required when authenticating with Client ID and Secret.")
credential = AsyncClientSecretCredential(
client_id=conn.login, client_secret=conn.password, tenant_id=tenant
)
else:
credential = AsyncDefaultAzureCredential()
self._async_conn = AsyncDataFactoryManagementClient(
credential=credential,
subscription_id=subscription_id,
)
return self._async_conn
async def refresh_conn(self) -> AsyncDataFactoryManagementClient:
self._conn = None
return await self.get_async_conn()
@provide_targeted_factory_async
async def get_pipeline_run(
self,
run_id: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> PipelineRun:
"""
Connect to Azure Data Factory asynchronously to get the pipeline run details by run id.
:param run_id: The pipeline run identifier.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
client = await self.get_async_conn()
pipeline_run = await client.pipeline_runs.get(resource_group_name, factory_name, run_id)
return pipeline_run
async def get_adf_pipeline_run_status(
self, run_id: str, resource_group_name: str | None = None, factory_name: str | None = None
) -> str:
"""
Connect to Azure Data Factory asynchronously and get the pipeline status by run_id.
:param run_id: The pipeline run identifier.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
"""
pipeline_run = await self.get_pipeline_run(
run_id=run_id,
factory_name=factory_name,
resource_group_name=resource_group_name,
)
status: str = pipeline_run.status
return status
@provide_targeted_factory_async
async def cancel_pipeline_run(
self,
run_id: str,
resource_group_name: str | None = None,
factory_name: str | None = None,
**config: Any,
) -> None:
"""
Cancel the pipeline run.
:param run_id: The pipeline run identifier.
:param resource_group_name: The resource group name.
:param factory_name: The factory name.
:param config: Extra parameters for the ADF client.
"""
client = await self.get_async_conn()
try:
await client.pipeline_runs.cancel(resource_group_name, factory_name, run_id)
except Exception as e:
raise AirflowException(e)
| 44,145 | 35.45417 | 110 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/hooks/wasb.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains integration with Azure Blob Storage.
It communicate via the Window Azure Storage Blob protocol. Make sure that a
Airflow connection of type `wasb` exists. Authorization can be done by supplying a
login (=Storage account name) and password (=KEY), or login and SAS token in the extra
field (see connection `wasb_default` for an example).
"""
from __future__ import annotations
import logging
import os
from functools import wraps
from typing import Any, Union
from asgiref.sync import sync_to_async
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError
from azure.identity import ClientSecretCredential, DefaultAzureCredential
from azure.identity.aio import (
ClientSecretCredential as AsyncClientSecretCredential,
DefaultAzureCredential as AsyncDefaultAzureCredential,
)
from azure.storage.blob import BlobClient, BlobServiceClient, ContainerClient, StorageStreamDownloader
from azure.storage.blob._models import BlobProperties
from azure.storage.blob.aio import (
BlobClient as AsyncBlobClient,
BlobServiceClient as AsyncBlobServiceClient,
ContainerClient as AsyncContainerClient,
)
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
AsyncCredentials = Union[AsyncClientSecretCredential, AsyncDefaultAzureCredential]
def _ensure_prefixes(conn_type):
"""
Deprecated.
Remove when provider min airflow version >= 2.5.0 since this is handled by
provider manager from that version.
"""
def dec(func):
@wraps(func)
def inner():
field_behaviors = func()
conn_attrs = {"host", "schema", "login", "password", "port", "extra"}
def _ensure_prefix(field):
if field not in conn_attrs and not field.startswith("extra__"):
return f"extra__{conn_type}__{field}"
else:
return field
if "placeholders" in field_behaviors:
placeholders = field_behaviors["placeholders"]
field_behaviors["placeholders"] = {_ensure_prefix(k): v for k, v in placeholders.items()}
return field_behaviors
return inner
return dec
class WasbHook(BaseHook):
"""
Interacts with Azure Blob Storage through the ``wasb://`` protocol.
These parameters have to be passed in Airflow Data Base: account_name and account_key.
Additional options passed in the 'extra' field of the connection will be
passed to the `BlockBlockService()` constructor. For example, authenticate
using a SAS token by adding {"sas_token": "YOUR_TOKEN"}.
If no authentication configuration is provided, DefaultAzureCredential will be used (applicable
when using Azure compute infrastructure).
:param wasb_conn_id: Reference to the :ref:`wasb connection <howto/connection:wasb>`.
:param public_read: Whether an anonymous public read access should be used. default is False
"""
conn_name_attr = "wasb_conn_id"
default_conn_name = "wasb_default"
conn_type = "wasb"
hook_name = "Azure Blob Storage"
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"connection_string": PasswordField(
lazy_gettext("Blob Storage Connection String (optional)"), widget=BS3PasswordFieldWidget()
),
"shared_access_key": PasswordField(
lazy_gettext("Blob Storage Shared Access Key (optional)"), widget=BS3PasswordFieldWidget()
),
"tenant_id": StringField(
lazy_gettext("Tenant Id (Active Directory Auth)"), widget=BS3TextFieldWidget()
),
"sas_token": PasswordField(lazy_gettext("SAS Token (optional)"), widget=BS3PasswordFieldWidget()),
}
@staticmethod
@_ensure_prefixes(conn_type="wasb")
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "port"],
"relabeling": {
"login": "Blob Storage Login (optional)",
"password": "Blob Storage Key (optional)",
"host": "Account Name (Active Directory Auth)",
},
"placeholders": {
"login": "account name",
"password": "secret",
"host": "account url",
"connection_string": "connection string auth",
"tenant_id": "tenant",
"shared_access_key": "shared access key",
"sas_token": "account url or token",
"extra": "additional options for use with ClientSecretCredential or DefaultAzureCredential",
},
}
def __init__(
self,
wasb_conn_id: str = default_conn_name,
public_read: bool = False,
) -> None:
super().__init__()
self.conn_id = wasb_conn_id
self.public_read = public_read
self.blob_service_client = self.get_conn()
logger = logging.getLogger("azure.core.pipeline.policies.http_logging_policy")
try:
logger.setLevel(os.environ.get("AZURE_HTTP_LOGGING_LEVEL", logging.WARNING))
except ValueError:
logger.setLevel(logging.WARNING)
def _get_field(self, extra_dict, field_name):
prefix = "extra__wasb__"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{prefix}' prefix "
f"when using this method."
)
if field_name in extra_dict:
return extra_dict[field_name] or None
return extra_dict.get(f"{prefix}{field_name}") or None
def get_conn(self) -> BlobServiceClient:
"""Return the BlobServiceClient object."""
conn = self.get_connection(self.conn_id)
extra = conn.extra_dejson or {}
client_secret_auth_config = extra.pop("client_secret_auth_config", {})
connection_string = self._get_field(extra, "connection_string")
if connection_string:
# connection_string auth takes priority
return BlobServiceClient.from_connection_string(connection_string, **extra)
tenant = self._get_field(extra, "tenant_id")
if tenant:
# use Active Directory auth
app_id = conn.login
app_secret = conn.password
token_credential = ClientSecretCredential(tenant, app_id, app_secret, **client_secret_auth_config)
return BlobServiceClient(account_url=conn.host, credential=token_credential, **extra)
account_url = conn.host if conn.host else f"https://{conn.login}.blob.core.windows.net/"
if self.public_read:
# Here we use anonymous public read
# more info
# https://docs.microsoft.com/en-us/azure/storage/blobs/storage-manage-access-to-resources
return BlobServiceClient(account_url=account_url, **extra)
shared_access_key = self._get_field(extra, "shared_access_key")
if shared_access_key:
# using shared access key
return BlobServiceClient(account_url=account_url, credential=shared_access_key, **extra)
sas_token = self._get_field(extra, "sas_token")
if sas_token:
if sas_token.startswith("https"):
return BlobServiceClient(account_url=sas_token, **extra)
else:
if not account_url.startswith("https://"):
# TODO: require url in the host field in the next major version?
account_url = f"https://{conn.login}.blob.core.windows.net"
return BlobServiceClient(account_url=f"{account_url.rstrip('/')}/{sas_token}", **extra)
# Fall back to old auth (password) or use managed identity if not provided.
credential = conn.password
if not credential:
credential = DefaultAzureCredential()
self.log.info("Using DefaultAzureCredential as credential")
if not account_url.startswith("https://"):
# TODO: require url in the host field in the next major version?
account_url = f"https://{conn.login}.blob.core.windows.net/"
return BlobServiceClient(
account_url=account_url,
credential=credential,
**extra,
)
def _get_container_client(self, container_name: str) -> ContainerClient:
"""
Instantiates a container client.
:param container_name: The name of the container
:return: ContainerClient
"""
return self.blob_service_client.get_container_client(container_name)
def _get_blob_client(self, container_name: str, blob_name: str) -> BlobClient:
"""
Instantiates a blob client.
:param container_name: The name of the blob container
:param blob_name: The name of the blob. This needs not be existing
"""
return self.blob_service_client.get_blob_client(container=container_name, blob=blob_name)
def check_for_blob(self, container_name: str, blob_name: str, **kwargs) -> bool:
"""
Check if a blob exists on Azure Blob Storage.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param kwargs: Optional keyword arguments for ``BlobClient.get_blob_properties`` takes.
:return: True if the blob exists, False otherwise.
"""
try:
self._get_blob_client(container_name, blob_name).get_blob_properties(**kwargs)
except ResourceNotFoundError:
return False
return True
def check_for_prefix(self, container_name: str, prefix: str, **kwargs) -> bool:
"""
Check if a prefix exists on Azure Blob storage.
:param container_name: Name of the container.
:param prefix: Prefix of the blob.
:param kwargs: Optional keyword arguments that ``ContainerClient.walk_blobs`` takes
:return: True if blobs matching the prefix exist, False otherwise.
"""
blobs = self.get_blobs_list(container_name=container_name, prefix=prefix, **kwargs)
return len(blobs) > 0
def get_blobs_list(
self,
container_name: str,
prefix: str | None = None,
include: list[str] | None = None,
delimiter: str = "/",
**kwargs,
) -> list:
"""
List blobs in a given container.
:param container_name: The name of the container
:param prefix: Filters the results to return only blobs whose names
begin with the specified prefix.
:param include: Specifies one or more additional datasets to include in the
response. Options include: ``snapshots``, ``metadata``, ``uncommittedblobs``,
``copy`, ``deleted``.
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
"""
container = self._get_container_client(container_name)
blob_list = []
blobs = container.walk_blobs(name_starts_with=prefix, include=include, delimiter=delimiter, **kwargs)
for blob in blobs:
blob_list.append(blob.name)
return blob_list
def get_blobs_list_recursive(
self,
container_name: str,
prefix: str | None = None,
include: list[str] | None = None,
endswith: str = "",
**kwargs,
) -> list:
"""
List blobs in a given container.
:param container_name: The name of the container
:param prefix: Filters the results to return only blobs whose names
begin with the specified prefix.
:param include: Specifies one or more additional datasets to include in the
response. Options include: ``snapshots``, ``metadata``, ``uncommittedblobs``,
``copy`, ``deleted``.
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
"""
container = self._get_container_client(container_name)
blob_list = []
blobs = container.list_blobs(name_starts_with=prefix, include=include, **kwargs)
for blob in blobs:
if blob.name.endswith(endswith):
blob_list.append(blob.name)
return blob_list
def load_file(
self,
file_path: str,
container_name: str,
blob_name: str,
create_container: bool = False,
**kwargs,
) -> None:
"""
Upload a file to Azure Blob Storage.
:param file_path: Path to the file to load.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param create_container: Attempt to create the target container prior to uploading the blob. This is
useful if the target container may not exist yet. Defaults to False.
:param kwargs: Optional keyword arguments that ``BlobClient.upload_blob()`` takes.
"""
with open(file_path, "rb") as data:
self.upload(
container_name=container_name,
blob_name=blob_name,
data=data,
create_container=create_container,
**kwargs,
)
def load_string(
self,
string_data: str,
container_name: str,
blob_name: str,
create_container: bool = False,
**kwargs,
) -> None:
"""
Upload a string to Azure Blob Storage.
:param string_data: String to load.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param create_container: Attempt to create the target container prior to uploading the blob. This is
useful if the target container may not exist yet. Defaults to False.
:param kwargs: Optional keyword arguments that ``BlobClient.upload()`` takes.
"""
# Reorder the argument order from airflow.providers.amazon.aws.hooks.s3.load_string.
self.upload(
container_name=container_name,
blob_name=blob_name,
data=string_data,
create_container=create_container,
**kwargs,
)
def get_file(self, file_path: str, container_name: str, blob_name: str, **kwargs):
"""
Download a file from Azure Blob Storage.
:param file_path: Path to the file to download.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param kwargs: Optional keyword arguments that `BlobClient.download_blob()` takes.
"""
with open(file_path, "wb") as fileblob:
stream = self.download(container_name=container_name, blob_name=blob_name, **kwargs)
fileblob.write(stream.readall())
def read_file(self, container_name: str, blob_name: str, **kwargs):
"""
Read a file from Azure Blob Storage and return as a string.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param kwargs: Optional keyword arguments that `BlobClient.download_blob` takes.
"""
return self.download(container_name, blob_name, **kwargs).content_as_text()
def upload(
self,
container_name: str,
blob_name: str,
data: Any,
blob_type: str = "BlockBlob",
length: int | None = None,
create_container: bool = False,
**kwargs,
) -> dict[str, Any]:
"""
Creates a new blob from a data source with automatic chunking.
:param container_name: The name of the container to upload data
:param blob_name: The name of the blob to upload. This need not exist in the container
:param data: The blob data to upload
:param blob_type: The type of the blob. This can be either ``BlockBlob``,
``PageBlob`` or ``AppendBlob``. The default value is ``BlockBlob``.
:param length: Number of bytes to read from the stream. This is optional,
but should be supplied for optimal performance.
:param create_container: Attempt to create the target container prior to uploading the blob. This is
useful if the target container may not exist yet. Defaults to False.
"""
if create_container:
self.create_container(container_name)
blob_client = self._get_blob_client(container_name, blob_name)
return blob_client.upload_blob(data, blob_type, length=length, **kwargs)
def download(
self, container_name, blob_name, offset: int | None = None, length: int | None = None, **kwargs
) -> StorageStreamDownloader:
"""
Downloads a blob to the StorageStreamDownloader.
:param container_name: The name of the container containing the blob
:param blob_name: The name of the blob to download
:param offset: Start of byte range to use for downloading a section of the blob.
Must be set if length is provided.
:param length: Number of bytes to read from the stream.
"""
blob_client = self._get_blob_client(container_name, blob_name)
return blob_client.download_blob(offset=offset, length=length, **kwargs)
def create_container(self, container_name: str) -> None:
"""
Create container object if not already existing.
:param container_name: The name of the container to create
"""
container_client = self._get_container_client(container_name)
try:
self.log.debug("Attempting to create container: %s", container_name)
container_client.create_container()
self.log.info("Created container: %s", container_name)
except ResourceExistsError:
self.log.info(
"Attempted to create container %r but it already exists. If it is expected that this "
"container will always exist, consider setting create_container to False.",
container_name,
)
except HttpResponseError as e:
self.log.info(
"Received an HTTP response error while attempting to creating container %r: %s"
"\nIf the error is related to missing permissions to create containers, please consider "
"setting create_container to False or supplying connection credentials with the "
"appropriate permission for connection ID %r.",
container_name,
e.response,
self.conn_id,
)
except Exception as e:
self.log.info("Error while attempting to create container %r: %s", container_name, e)
raise
def delete_container(self, container_name: str) -> None:
"""
Delete a container object.
:param container_name: The name of the container
"""
try:
self.log.debug("Attempting to delete container: %s", container_name)
self._get_container_client(container_name).delete_container()
self.log.info("Deleted container: %s", container_name)
except ResourceNotFoundError:
self.log.info("Unable to delete container %s (not found)", container_name)
except:
self.log.info("Error deleting container: %s", container_name)
raise
def delete_blobs(self, container_name: str, *blobs, **kwargs) -> None:
"""
Marks the specified blobs or snapshots for deletion.
:param container_name: The name of the container containing the blobs
:param blobs: The blobs to delete. This can be a single blob, or multiple values
can be supplied, where each value is either the name of the blob (str) or BlobProperties.
"""
self._get_container_client(container_name).delete_blobs(*blobs, **kwargs)
self.log.info("Deleted blobs: %s", blobs)
def delete_file(
self,
container_name: str,
blob_name: str,
is_prefix: bool = False,
ignore_if_missing: bool = False,
delimiter: str = "",
**kwargs,
) -> None:
"""
Delete a file, or all blobs matching a prefix, from Azure Blob Storage.
:param container_name: Name of the container.
:param blob_name: Name of the blob.
:param is_prefix: If blob_name is a prefix, delete all matching files
:param ignore_if_missing: if True, then return success even if the
blob does not exist.
:param kwargs: Optional keyword arguments that ``ContainerClient.delete_blobs()`` takes.
"""
if is_prefix:
blobs_to_delete = self.get_blobs_list(
container_name, prefix=blob_name, delimiter=delimiter, **kwargs
)
elif self.check_for_blob(container_name, blob_name):
blobs_to_delete = [blob_name]
else:
blobs_to_delete = []
if not ignore_if_missing and len(blobs_to_delete) == 0:
raise AirflowException(f"Blob(s) not found: {blob_name}")
# The maximum number of blobs that can be deleted in a single request is 256 using the underlying
# `ContainerClient.delete_blobs()` method. Therefore the deletes need to be in batches of <= 256.
num_blobs_to_delete = len(blobs_to_delete)
for i in range(0, num_blobs_to_delete, 256):
self.delete_blobs(container_name, *blobs_to_delete[i : i + 256], **kwargs)
def test_connection(self):
"""Test Azure Blob Storage connection."""
success = (True, "Successfully connected to Azure Blob Storage.")
try:
# Attempt to retrieve storage account information
self.get_conn().get_account_information()
return success
except Exception as e:
return False, str(e)
class WasbAsyncHook(WasbHook):
"""
An async hook that connects to Azure WASB to perform operations.
:param wasb_conn_id: reference to the :ref:`wasb connection <howto/connection:wasb>`
:param public_read: whether an anonymous public read access should be used. default is False
"""
def __init__(
self,
wasb_conn_id: str = "wasb_default",
public_read: bool = False,
) -> None:
"""Initialize the hook instance."""
self.conn_id = wasb_conn_id
self.public_read = public_read
self.blob_service_client: AsyncBlobServiceClient = None # type: ignore
async def get_async_conn(self) -> AsyncBlobServiceClient:
"""Return the Async BlobServiceClient object."""
if self.blob_service_client is not None:
return self.blob_service_client
conn = await sync_to_async(self.get_connection)(self.conn_id)
extra = conn.extra_dejson or {}
client_secret_auth_config = extra.pop("client_secret_auth_config", {})
connection_string = self._get_field(extra, "connection_string")
if connection_string:
# connection_string auth takes priority
self.blob_service_client = AsyncBlobServiceClient.from_connection_string(
connection_string, **extra
)
return self.blob_service_client
tenant = self._get_field(extra, "tenant_id")
if tenant:
# use Active Directory auth
app_id = conn.login
app_secret = conn.password
token_credential = AsyncClientSecretCredential(
tenant, app_id, app_secret, **client_secret_auth_config
)
self.blob_service_client = AsyncBlobServiceClient(
account_url=conn.host, credential=token_credential, **extra # type:ignore[arg-type]
)
return self.blob_service_client
account_url = conn.host if conn.host else f"https://{conn.login}.blob.core.windows.net/"
if self.public_read:
# Here we use anonymous public read
# more info
# https://docs.microsoft.com/en-us/azure/storage/blobs/storage-manage-access-to-resources
self.blob_service_client = AsyncBlobServiceClient(account_url=account_url, **extra)
return self.blob_service_client
shared_access_key = self._get_field(extra, "shared_access_key")
if shared_access_key:
# using shared access key
self.blob_service_client = AsyncBlobServiceClient(
account_url=account_url, credential=shared_access_key, **extra
)
return self.blob_service_client
sas_token = self._get_field(extra, "sas_token")
if sas_token:
if sas_token.startswith("https"):
self.blob_service_client = AsyncBlobServiceClient(account_url=sas_token, **extra)
else:
self.blob_service_client = AsyncBlobServiceClient(
account_url=f"{account_url}/{sas_token}", **extra
)
return self.blob_service_client
# Fall back to old auth (password) or use managed identity if not provided.
credential = conn.password
if not credential:
credential = AsyncDefaultAzureCredential()
self.log.info("Using DefaultAzureCredential as credential")
self.blob_service_client = AsyncBlobServiceClient(
account_url=account_url,
credential=credential,
**extra,
)
return self.blob_service_client
def _get_blob_client(self, container_name: str, blob_name: str) -> AsyncBlobClient:
"""
Instantiate a blob client.
:param container_name: the name of the blob container
:param blob_name: the name of the blob. This needs not be existing
"""
return self.blob_service_client.get_blob_client(container=container_name, blob=blob_name)
async def check_for_blob_async(self, container_name: str, blob_name: str, **kwargs: Any) -> bool:
"""
Check if a blob exists on Azure Blob Storage.
:param container_name: name of the container
:param blob_name: name of the blob
:param kwargs: optional keyword arguments for ``BlobClient.get_blob_properties``
"""
try:
await self._get_blob_client(container_name, blob_name).get_blob_properties(**kwargs)
except ResourceNotFoundError:
return False
return True
def _get_container_client(self, container_name: str) -> AsyncContainerClient:
"""
Instantiate a container client.
:param container_name: the name of the container
"""
return self.blob_service_client.get_container_client(container_name)
async def get_blobs_list_async(
self,
container_name: str,
prefix: str | None = None,
include: list[str] | None = None,
delimiter: str = "/",
**kwargs: Any,
) -> list[BlobProperties]:
"""
List blobs in a given container.
:param container_name: the name of the container
:param prefix: filters the results to return only blobs whose names
begin with the specified prefix.
:param include: specifies one or more additional datasets to include in the
response. Options include: ``snapshots``, ``metadata``, ``uncommittedblobs``,
``copy`, ``deleted``.
:param delimiter: filters objects based on the delimiter (for e.g '.csv')
"""
container = self._get_container_client(container_name)
blob_list: list[BlobProperties] = []
blobs = container.walk_blobs(name_starts_with=prefix, include=include, delimiter=delimiter, **kwargs)
async for blob in blobs:
blob_list.append(blob)
return blob_list
async def check_for_prefix_async(self, container_name: str, prefix: str, **kwargs: Any) -> bool:
"""
Check if a prefix exists on Azure Blob storage.
:param container_name: Name of the container.
:param prefix: Prefix of the blob.
:param kwargs: Optional keyword arguments for ``ContainerClient.walk_blobs``
"""
blobs = await self.get_blobs_list_async(container_name=container_name, prefix=prefix, **kwargs)
return len(blobs) > 0
| 29,495 | 40.426966 | 110 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/hooks/container_instance.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from azure.mgmt.containerinstance import ContainerInstanceManagementClient
from azure.mgmt.containerinstance.models import ContainerGroup
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.microsoft.azure.hooks.base_azure import AzureBaseHook
class AzureContainerInstanceHook(AzureBaseHook):
"""
A hook to communicate with Azure Container Instances.
This hook requires a service principal in order to work.
After creating this service principal
(Azure Active Directory/App Registrations), you need to fill in the
client_id (Application ID) as login, the generated password as password,
and tenantId and subscriptionId in the extra's field as a json.
:param azure_conn_id: :ref:`Azure connection id<howto/connection:azure>` of
a service principal which will be used to start the container instance.
"""
conn_name_attr = "azure_conn_id"
default_conn_name = "azure_default"
conn_type = "azure_container_instance"
hook_name = "Azure Container Instance"
def __init__(self, azure_conn_id: str = default_conn_name) -> None:
super().__init__(sdk_client=ContainerInstanceManagementClient, conn_id=azure_conn_id)
self.connection = self.get_conn()
def create_or_update(self, resource_group: str, name: str, container_group: ContainerGroup) -> None:
"""
Create a new container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
:param container_group: the properties of the container group
"""
self.connection.container_groups.create_or_update(resource_group, name, container_group)
def get_state_exitcode_details(self, resource_group: str, name: str) -> tuple:
"""
Get the state and exitcode of a container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
:return: A tuple with the state, exitcode, and details.
If the exitcode is unknown 0 is returned.
"""
warnings.warn(
"get_state_exitcode_details() is deprecated. Related method is get_state()",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
cg_state = self.get_state(resource_group, name)
c_state = cg_state.containers[0].instance_view.current_state
return (c_state.state, c_state.exit_code, c_state.detail_status)
def get_messages(self, resource_group: str, name: str) -> list:
"""
Get the messages of a container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
:return: A list of the event messages
"""
warnings.warn(
"get_messages() is deprecated. Related method is get_state()",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
cg_state = self.get_state(resource_group, name)
instance_view = cg_state.containers[0].instance_view
return [event.message for event in instance_view.events]
def get_state(self, resource_group: str, name: str) -> ContainerGroup:
"""
Get the state of a container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
:return: ContainerGroup
"""
return self.connection.container_groups.get(resource_group, name, raw=False)
def get_logs(self, resource_group: str, name: str, tail: int = 1000) -> list:
"""
Get the tail from logs of a container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
:param tail: the size of the tail
:return: A list of log messages
"""
logs = self.connection.container.list_logs(resource_group, name, name, tail=tail)
return logs.content.splitlines(True)
def delete(self, resource_group: str, name: str) -> None:
"""
Delete a container group.
:param resource_group: the name of the resource group
:param name: the name of the container group
"""
self.connection.container_groups.delete(resource_group, name)
def exists(self, resource_group: str, name: str) -> bool:
"""
Test if a container group exists.
:param resource_group: the name of the resource group
:param name: the name of the container group
"""
for container in self.connection.container_groups.list_by_resource_group(resource_group):
if container.name == name:
return True
return False
def test_connection(self):
"""Test a configured Azure Container Instance connection."""
try:
# Attempt to list existing container groups under the configured subscription and retrieve the
# first in the returned iterator. We need to _actually_ try to retrieve an object to properly
# test the connection.
next(self.connection.container_groups.list(), None)
except Exception as e:
return False, str(e)
return True, "Successfully connected to Azure Container Instance."
| 6,183 | 39.953642 | 106 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/hooks/base_azure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from azure.common.client_factory import get_client_from_auth_file, get_client_from_json_dict
from azure.common.credentials import ServicePrincipalCredentials
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
class AzureBaseHook(BaseHook):
"""
This hook acts as a base hook for azure services.
It offers several authentication mechanisms to authenticate
the client library used for upstream azure hooks.
:param sdk_client: The SDKClient to use.
:param conn_id: The :ref:`Azure connection id<howto/connection:azure>`
which refers to the information to connect to the service.
"""
conn_name_attr = "azure_conn_id"
default_conn_name = "azure_default"
conn_type = "azure"
hook_name = "Azure"
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"extra__azure__tenantId": StringField(
lazy_gettext("Azure Tenant ID"), widget=BS3TextFieldWidget()
),
"extra__azure__subscriptionId": StringField(
lazy_gettext("Azure Subscription ID"), widget=BS3TextFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
import json
return {
"hidden_fields": ["schema", "port", "host"],
"relabeling": {
"login": "Azure Client ID",
"password": "Azure Secret",
},
"placeholders": {
"extra": json.dumps(
{
"key_path": "path to json file for auth",
"key_json": "specifies json dict for auth",
},
indent=1,
),
"login": "client_id (token credentials auth)",
"password": "secret (token credentials auth)",
"extra__azure__tenantId": "tenantId (token credentials auth)",
"extra__azure__subscriptionId": "subscriptionId (token credentials auth)",
},
}
def __init__(self, sdk_client: Any, conn_id: str = "azure_default"):
self.sdk_client = sdk_client
self.conn_id = conn_id
super().__init__()
def get_conn(self) -> Any:
"""
Authenticates the resource using the connection id passed during init.
:return: the authenticated client.
"""
conn = self.get_connection(self.conn_id)
tenant = conn.extra_dejson.get("extra__azure__tenantId") or conn.extra_dejson.get("tenantId")
subscription_id = conn.extra_dejson.get("extra__azure__subscriptionId") or conn.extra_dejson.get(
"subscriptionId"
)
key_path = conn.extra_dejson.get("key_path")
if key_path:
if not key_path.endswith(".json"):
raise AirflowException("Unrecognised extension for key file.")
self.log.info("Getting connection using a JSON key file.")
return get_client_from_auth_file(client_class=self.sdk_client, auth_path=key_path)
key_json = conn.extra_dejson.get("key_json")
if key_json:
self.log.info("Getting connection using a JSON config.")
return get_client_from_json_dict(client_class=self.sdk_client, config_dict=key_json)
self.log.info("Getting connection using specific credentials and subscription_id.")
return self.sdk_client(
credentials=ServicePrincipalCredentials(
client_id=conn.login, secret=conn.password, tenant=tenant
),
subscription_id=subscription_id,
)
| 4,785 | 37.910569 | 105 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/hooks/batch.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from datetime import timedelta
from typing import Any
from azure.batch import BatchServiceClient, batch_auth, models as batch_models
from azure.batch.models import JobAddParameter, PoolAddParameter, TaskAddParameter
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection
from airflow.providers.microsoft.azure.utils import get_field
from airflow.utils import timezone
class AzureBatchHook(BaseHook):
"""
Hook for Azure Batch APIs.
:param azure_batch_conn_id: :ref:`Azure Batch connection id<howto/connection:azure_batch>`
of a service principal which will be used to start the container instance.
"""
conn_name_attr = "azure_batch_conn_id"
default_conn_name = "azure_batch_default"
conn_type = "azure_batch"
hook_name = "Azure Batch Service"
def _get_field(self, extras, name):
return get_field(
conn_id=self.conn_id,
conn_type=self.conn_type,
extras=extras,
field_name=name,
)
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"account_url": StringField(lazy_gettext("Batch Account URL"), widget=BS3TextFieldWidget()),
}
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "host", "extra"],
"relabeling": {
"login": "Batch Account Name",
"password": "Batch Account Access Key",
},
}
def __init__(self, azure_batch_conn_id: str = default_conn_name) -> None:
super().__init__()
self.conn_id = azure_batch_conn_id
self.connection = self.get_conn()
def _connection(self) -> Connection:
"""Get connected to Azure Batch service."""
conn = self.get_connection(self.conn_id)
return conn
def get_conn(self):
"""
Get the Batch client connection.
:return: Azure Batch client
"""
conn = self._connection()
batch_account_url = self._get_field(conn.extra_dejson, "account_url")
if not batch_account_url:
raise AirflowException("Batch Account URL parameter is missing.")
credentials = batch_auth.SharedKeyCredentials(conn.login, conn.password)
batch_client = BatchServiceClient(credentials, batch_url=batch_account_url)
return batch_client
def configure_pool(
self,
pool_id: str,
vm_size: str,
vm_node_agent_sku_id: str,
vm_publisher: str | None = None,
vm_offer: str | None = None,
sku_starts_with: str | None = None,
vm_sku: str | None = None,
vm_version: str | None = None,
os_family: str | None = None,
os_version: str | None = None,
display_name: str | None = None,
target_dedicated_nodes: int | None = None,
use_latest_image_and_sku: bool = False,
**kwargs,
) -> PoolAddParameter:
"""
Configures a pool.
:param pool_id: A string that uniquely identifies the Pool within the Account
:param vm_size: The size of virtual machines in the Pool.
:param display_name: The display name for the Pool
:param target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool.
:param use_latest_image_and_sku: Whether to use the latest verified vm image and sku
:param vm_publisher: The publisher of the Azure Virtual Machines Marketplace Image.
For example, Canonical or MicrosoftWindowsServer.
:param vm_offer: The offer type of the Azure Virtual Machines Marketplace Image.
For example, UbuntuServer or WindowsServer.
:param sku_starts_with: The start name of the sku to search
:param vm_sku: The name of the virtual machine sku to use
:param vm_version: The version of the virtual machine
:param vm_version: str
:param vm_node_agent_sku_id: The node agent sku id of the virtual machine
:param os_family: The Azure Guest OS family to be installed on the virtual machines in the Pool.
:param os_version: The OS family version
"""
if use_latest_image_and_sku:
self.log.info("Using latest verified virtual machine image with node agent sku")
sku_to_use, image_ref_to_use = self._get_latest_verified_image_vm_and_sku(
publisher=vm_publisher, offer=vm_offer, sku_starts_with=sku_starts_with
)
pool = batch_models.PoolAddParameter(
id=pool_id,
vm_size=vm_size,
display_name=display_name,
virtual_machine_configuration=batch_models.VirtualMachineConfiguration(
image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use
),
target_dedicated_nodes=target_dedicated_nodes,
**kwargs,
)
elif os_family:
self.log.info(
"Using cloud service configuration to create pool, virtual machine configuration ignored"
)
pool = batch_models.PoolAddParameter(
id=pool_id,
vm_size=vm_size,
display_name=display_name,
cloud_service_configuration=batch_models.CloudServiceConfiguration(
os_family=os_family, os_version=os_version
),
target_dedicated_nodes=target_dedicated_nodes,
**kwargs,
)
else:
self.log.info("Using virtual machine configuration to create a pool")
pool = batch_models.PoolAddParameter(
id=pool_id,
vm_size=vm_size,
display_name=display_name,
virtual_machine_configuration=batch_models.VirtualMachineConfiguration(
image_reference=batch_models.ImageReference(
publisher=vm_publisher,
offer=vm_offer,
sku=vm_sku,
version=vm_version,
),
node_agent_sku_id=vm_node_agent_sku_id,
),
target_dedicated_nodes=target_dedicated_nodes,
**kwargs,
)
return pool
def create_pool(self, pool: PoolAddParameter) -> None:
"""
Creates a pool if not already existing.
:param pool: the pool object to create
"""
try:
self.log.info("Attempting to create a pool: %s", pool.id)
self.connection.pool.add(pool)
self.log.info("Created pool: %s", pool.id)
except batch_models.BatchErrorException as err:
if not err.error or err.error.code != "PoolExists":
raise
else:
self.log.info("Pool %s already exists", pool.id)
def _get_latest_verified_image_vm_and_sku(
self,
publisher: str | None = None,
offer: str | None = None,
sku_starts_with: str | None = None,
) -> tuple:
"""
Get latest verified image vm and sku.
:param publisher: The publisher of the Azure Virtual Machines Marketplace Image.
For example, Canonical or MicrosoftWindowsServer.
:param offer: The offer type of the Azure Virtual Machines Marketplace Image.
For example, UbuntuServer or WindowsServer.
:param sku_starts_with: The start name of the sku to search
"""
options = batch_models.AccountListSupportedImagesOptions(filter="verificationType eq 'verified'")
images = self.connection.account.list_supported_images(account_list_supported_images_options=options)
# pick the latest supported sku
skus_to_use = [
(image.node_agent_sku_id, image.image_reference)
for image in images
if image.image_reference.publisher.lower() == publisher
and image.image_reference.offer.lower() == offer
and image.image_reference.sku.startswith(sku_starts_with)
]
# pick first
agent_sku_id, image_ref_to_use = skus_to_use[0]
return agent_sku_id, image_ref_to_use
def wait_for_all_node_state(self, pool_id: str, node_state: set) -> list:
"""
Wait for all nodes in a pool to reach given states.
:param pool_id: A string that identifies the pool
:param node_state: A set of batch_models.ComputeNodeState
"""
self.log.info("waiting for all nodes in pool %s to reach one of: %s", pool_id, node_state)
while True:
# refresh pool to ensure that there is no resize error
pool = self.connection.pool.get(pool_id)
if pool.resize_errors is not None:
resize_errors = "\n".join(repr(e) for e in pool.resize_errors)
raise RuntimeError(f"resize error encountered for pool {pool.id}:\n{resize_errors}")
nodes = list(self.connection.compute_node.list(pool.id))
if len(nodes) >= pool.target_dedicated_nodes and all(node.state in node_state for node in nodes):
return nodes
# Allow the timeout to be controlled by the AzureBatchOperator
# specified timeout. This way we don't interrupt a startTask inside
# the pool
time.sleep(10)
def configure_job(
self,
job_id: str,
pool_id: str,
display_name: str | None = None,
**kwargs,
) -> JobAddParameter:
"""
Configures a job for use in the pool.
:param job_id: A string that uniquely identifies the job within the account
:param pool_id: A string that identifies the pool
:param display_name: The display name for the job
"""
job = batch_models.JobAddParameter(
id=job_id,
pool_info=batch_models.PoolInformation(pool_id=pool_id),
display_name=display_name,
**kwargs,
)
return job
def create_job(self, job: JobAddParameter) -> None:
"""
Creates a job in the pool.
:param job: The job object to create
"""
try:
self.connection.job.add(job)
self.log.info("Job %s created", job.id)
except batch_models.BatchErrorException as err:
if not err.error or err.error.code != "JobExists":
raise
else:
self.log.info("Job %s already exists", job.id)
def configure_task(
self,
task_id: str,
command_line: str,
display_name: str | None = None,
container_settings=None,
**kwargs,
) -> TaskAddParameter:
"""
Creates a task.
:param task_id: A string that identifies the task to create
:param command_line: The command line of the Task.
:param display_name: A display name for the Task
:param container_settings: The settings for the container under which the Task runs.
If the Pool that will run this Task has containerConfiguration set,
this must be set as well. If the Pool that will run this Task doesn't have
containerConfiguration set, this must not be set.
"""
task = batch_models.TaskAddParameter(
id=task_id,
command_line=command_line,
display_name=display_name,
container_settings=container_settings,
**kwargs,
)
self.log.info("Task created: %s", task_id)
return task
def add_single_task_to_job(self, job_id: str, task: TaskAddParameter) -> None:
"""
Add a single task to given job if it doesn't exist.
:param job_id: A string that identifies the given job
:param task: The task to add
"""
try:
self.connection.task.add(job_id=job_id, task=task)
except batch_models.BatchErrorException as err:
if not err.error or err.error.code != "TaskExists":
raise
else:
self.log.info("Task %s already exists", task.id)
def wait_for_job_tasks_to_complete(self, job_id: str, timeout: int) -> list[batch_models.CloudTask]:
"""
Wait for tasks in a particular job to complete.
:param job_id: A string that identifies the job
:param timeout: The amount of time to wait before timing out in minutes
"""
timeout_time = timezone.utcnow() + timedelta(minutes=timeout)
while timezone.utcnow() < timeout_time:
tasks = self.connection.task.list(job_id)
incomplete_tasks = [task for task in tasks if task.state != batch_models.TaskState.completed]
if not incomplete_tasks:
# detect if any task in job has failed
fail_tasks = [
task
for task in tasks
if task.executionInfo.result == batch_models.TaskExecutionResult.failure
]
return fail_tasks
for task in incomplete_tasks:
self.log.info("Waiting for %s to complete, currently on %s state", task.id, task.state)
time.sleep(15)
raise TimeoutError("Timed out waiting for tasks to complete")
def test_connection(self):
"""Test a configured Azure Batch connection."""
try:
# Attempt to list existing jobs under the configured Batch account and retrieve
# the first in the returned iterator. The Azure Batch API does allow for creation of a
# BatchServiceClient with incorrect values but then will fail properly once items are
# retrieved using the client. We need to _actually_ try to retrieve an object to properly
# test the connection.
next(self.get_conn().job.list(), None)
except Exception as e:
return False, str(e)
return True, "Successfully connected to Azure Batch."
| 15,302 | 38.038265 | 109 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/hooks/synapse.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from typing import Any, Union
from azure.identity import ClientSecretCredential, DefaultAzureCredential
from azure.synapse.spark import SparkClient
from azure.synapse.spark.models import SparkBatchJobOptions
from airflow.exceptions import AirflowTaskTimeout
from airflow.hooks.base import BaseHook
from airflow.providers.microsoft.azure.utils import get_field
Credentials = Union[ClientSecretCredential, DefaultAzureCredential]
class AzureSynapseSparkBatchRunStatus:
"""Azure Synapse Spark Job operation statuses."""
NOT_STARTED = "not_started"
STARTING = "starting"
RUNNING = "running"
IDLE = "idle"
BUSY = "busy"
SHUTTING_DOWN = "shutting_down"
ERROR = "error"
DEAD = "dead"
KILLED = "killed"
SUCCESS = "success"
TERMINAL_STATUSES = {SUCCESS, DEAD, KILLED, ERROR}
class AzureSynapseHook(BaseHook):
"""
A hook to interact with Azure Synapse.
:param azure_synapse_conn_id: The :ref:`Azure Synapse connection id<howto/connection:synapse>`.
:param spark_pool: The Apache Spark pool used to submit the job
"""
conn_type: str = "azure_synapse"
conn_name_attr: str = "azure_synapse_conn_id"
default_conn_name: str = "azure_synapse_default"
hook_name: str = "Azure Synapse"
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"tenantId": StringField(lazy_gettext("Tenant ID"), widget=BS3TextFieldWidget()),
"subscriptionId": StringField(lazy_gettext("Subscription ID"), widget=BS3TextFieldWidget()),
}
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "extra"],
"relabeling": {"login": "Client ID", "password": "Secret", "host": "Synapse Workspace URL"},
}
def __init__(self, azure_synapse_conn_id: str = default_conn_name, spark_pool: str = ""):
self.job_id: int | None = None
self._conn: SparkClient | None = None
self.conn_id = azure_synapse_conn_id
self.spark_pool = spark_pool
super().__init__()
def _get_field(self, extras, name):
return get_field(
conn_id=self.conn_id,
conn_type=self.conn_type,
extras=extras,
field_name=name,
)
def get_conn(self) -> SparkClient:
if self._conn is not None:
return self._conn
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
tenant = self._get_field(extras, "tenantId")
spark_pool = self.spark_pool
livy_api_version = "2022-02-22-preview"
subscription_id = self._get_field(extras, "subscriptionId")
if not subscription_id:
raise ValueError("A Subscription ID is required to connect to Azure Synapse.")
credential: Credentials
if conn.login is not None and conn.password is not None:
if not tenant:
raise ValueError("A Tenant ID is required when authenticating with Client ID and Secret.")
credential = ClientSecretCredential(
client_id=conn.login, client_secret=conn.password, tenant_id=tenant
)
else:
credential = DefaultAzureCredential()
self._conn = self._create_client(credential, conn.host, spark_pool, livy_api_version, subscription_id)
return self._conn
@staticmethod
def _create_client(credential: Credentials, host, spark_pool, livy_api_version, subscription_id: str):
return SparkClient(
credential=credential,
endpoint=host,
spark_pool_name=spark_pool,
livy_api_version=livy_api_version,
subscription_id=subscription_id,
)
def run_spark_job(
self,
payload: SparkBatchJobOptions,
):
"""
Run a job in an Apache Spark pool.
:param payload: Livy compatible payload which represents the spark job that a user wants to submit.
"""
job = self.get_conn().spark_batch.create_spark_batch_job(payload)
self.job_id = job.id
return job
def get_job_run_status(self):
"""Get the job run status."""
job_run_status = self.get_conn().spark_batch.get_spark_batch_job(batch_id=self.job_id).state
return job_run_status
def wait_for_job_run_status(
self,
job_id: int | None,
expected_statuses: str | set[str],
check_interval: int = 60,
timeout: int = 60 * 60 * 24 * 7,
) -> bool:
"""
Waits for a job run to match an expected status.
:param job_id: The job run identifier.
:param expected_statuses: The desired status(es) to check against a job run's current status.
:param check_interval: Time in seconds to check on a job run's status.
:param timeout: Time in seconds to wait for a job to reach a terminal status or the expected
status.
"""
job_run_status = self.get_job_run_status()
start_time = time.monotonic()
while (
job_run_status not in AzureSynapseSparkBatchRunStatus.TERMINAL_STATUSES
and job_run_status not in expected_statuses
):
# Check if the job-run duration has exceeded the ``timeout`` configured.
if start_time + timeout < time.monotonic():
raise AirflowTaskTimeout(
f"Job {job_id} has not reached a terminal status after {timeout} seconds."
)
# Wait to check the status of the job run based on the ``check_interval`` configured.
self.log.info("Sleeping for %s seconds", str(check_interval))
time.sleep(check_interval)
job_run_status = self.get_job_run_status()
self.log.info("Current spark job run status is %s", job_run_status)
return job_run_status in expected_statuses
def cancel_job_run(
self,
job_id: int,
) -> None:
"""
Cancel the spark job run.
:param job_id: The synapse spark job identifier.
"""
self.get_conn().spark_batch.cancel_spark_batch_job(job_id)
| 7,320 | 34.887255 | 110 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/hooks/data_lake.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
from azure.core.exceptions import ResourceExistsError, ResourceNotFoundError
from azure.datalake.store import core, lib, multithread
from azure.identity import ClientSecretCredential
from azure.storage.filedatalake import (
DataLakeDirectoryClient,
DataLakeFileClient,
DataLakeServiceClient,
DirectoryProperties,
FileSystemClient,
FileSystemProperties,
)
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.providers.microsoft.azure.utils import _ensure_prefixes, get_field
class AzureDataLakeHook(BaseHook):
"""Integration with Azure Data Lake.
AzureDataLakeHook communicates via a REST API compatible with WebHDFS. Make
sure that a Airflow connection of type ``azure_data_lake`` exists.
Authorization can be done by supplying a *login* (=Client ID), *password*
(=Client Secret), and extra fields *tenant* (Tenant) and *account_name*
(Account Name). See connection ``azure_data_lake_default`` for an example.
Client ID and secret should be in user and password parameters.
Tenant and account name should be extra field as
``{"tenant": "<TENANT>", "account_name": "ACCOUNT_NAME"}``.
:param azure_data_lake_conn_id: Reference to
:ref:`Azure Data Lake connection<howto/connection:adl>`.
"""
conn_name_attr = "azure_data_lake_conn_id"
default_conn_name = "azure_data_lake_default"
conn_type = "azure_data_lake"
hook_name = "Azure Data Lake"
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"tenant": StringField(lazy_gettext("Azure Tenant ID"), widget=BS3TextFieldWidget()),
"account_name": StringField(
lazy_gettext("Azure DataLake Store Name"), widget=BS3TextFieldWidget()
),
}
@staticmethod
@_ensure_prefixes(conn_type="azure_data_lake")
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "port", "host", "extra"],
"relabeling": {
"login": "Azure Client ID",
"password": "Azure Client Secret",
},
"placeholders": {
"login": "client id",
"password": "secret",
"tenant": "tenant id",
"account_name": "datalake store",
},
}
def __init__(self, azure_data_lake_conn_id: str = default_conn_name) -> None:
super().__init__()
self.conn_id = azure_data_lake_conn_id
self._conn: core.AzureDLFileSystem | None = None
self.account_name: str | None = None
def _get_field(self, extras, name):
return get_field(
conn_id=self.conn_id,
conn_type=self.conn_type,
extras=extras,
field_name=name,
)
def get_conn(self) -> core.AzureDLFileSystem:
"""Return a AzureDLFileSystem object."""
if not self._conn:
conn = self.get_connection(self.conn_id)
extras = conn.extra_dejson
self.account_name = self._get_field(extras, "account_name")
tenant = self._get_field(extras, "tenant")
adl_creds = lib.auth(tenant_id=tenant, client_secret=conn.password, client_id=conn.login)
self._conn = core.AzureDLFileSystem(adl_creds, store_name=self.account_name)
self._conn.connect()
return self._conn
def check_for_file(self, file_path: str) -> bool:
"""Check if a file exists on Azure Data Lake.
:param file_path: Path and name of the file.
:return: True if the file exists, False otherwise.
"""
try:
files = self.get_conn().glob(file_path, details=False, invalidate_cache=True)
return len(files) == 1
except FileNotFoundError:
return False
def upload_file(
self,
local_path: str,
remote_path: str,
nthreads: int = 64,
overwrite: bool = True,
buffersize: int = 4194304,
blocksize: int = 4194304,
**kwargs,
) -> None:
"""Upload a file to Azure Data Lake.
:param local_path: local path. Can be single file, directory (in which case,
upload recursively) or glob pattern. Recursive glob patterns using `**`
are not supported.
:param remote_path: Remote path to upload to; if multiple files, this is the
directory root to write within.
:param nthreads: Number of threads to use. If None, uses the number of cores.
:param overwrite: Whether to forcibly overwrite existing files/directories.
If False and remote path is a directory, will quit regardless if any files
would be overwritten or not. If True, only matching filenames are actually
overwritten.
:param buffersize: int [2**22]
Number of bytes for internal buffer. This block cannot be bigger than
a chunk and cannot be smaller than a block.
:param blocksize: int [2**22]
Number of bytes for a block. Within each chunk, we write a smaller
block for each API call. This block cannot be bigger than a chunk.
"""
multithread.ADLUploader(
self.get_conn(),
lpath=local_path,
rpath=remote_path,
nthreads=nthreads,
overwrite=overwrite,
buffersize=buffersize,
blocksize=blocksize,
**kwargs,
)
def download_file(
self,
local_path: str,
remote_path: str,
nthreads: int = 64,
overwrite: bool = True,
buffersize: int = 4194304,
blocksize: int = 4194304,
**kwargs,
) -> None:
"""Download a file from Azure Blob Storage.
:param local_path: local path. If downloading a single file, will write to this
specific file, unless it is an existing directory, in which case a file is
created within it. If downloading multiple files, this is the root
directory to write within. Will create directories as required.
:param remote_path: remote path/globstring to use to find remote files.
Recursive glob patterns using `**` are not supported.
:param nthreads: Number of threads to use. If None, uses the number of cores.
:param overwrite: Whether to forcibly overwrite existing files/directories.
If False and remote path is a directory, will quit regardless if any files
would be overwritten or not. If True, only matching filenames are actually
overwritten.
:param buffersize: int [2**22]
Number of bytes for internal buffer. This block cannot be bigger than
a chunk and cannot be smaller than a block.
:param blocksize: int [2**22]
Number of bytes for a block. Within each chunk, we write a smaller
block for each API call. This block cannot be bigger than a chunk.
"""
multithread.ADLDownloader(
self.get_conn(),
lpath=local_path,
rpath=remote_path,
nthreads=nthreads,
overwrite=overwrite,
buffersize=buffersize,
blocksize=blocksize,
**kwargs,
)
def list(self, path: str) -> list:
"""List files in Azure Data Lake Storage.
:param path: full path/globstring to use to list files in ADLS
"""
if "*" in path:
return self.get_conn().glob(path)
else:
return self.get_conn().walk(path)
def remove(self, path: str, recursive: bool = False, ignore_not_found: bool = True) -> None:
"""Remove files in Azure Data Lake Storage.
:param path: A directory or file to remove in ADLS
:param recursive: Whether to loop into directories in the location and remove the files
:param ignore_not_found: Whether to raise error if file to delete is not found
"""
try:
self.get_conn().remove(path=path, recursive=recursive)
except FileNotFoundError:
if ignore_not_found:
self.log.info("File %s not found", path)
else:
raise AirflowException(f"File {path} not found")
class AzureDataLakeStorageV2Hook(BaseHook):
"""Interact with a ADLS gen2 storage account.
It mainly helps to create and manage directories and files in storage
accounts that have a hierarchical namespace. Using Adls_v2 connection
details create DataLakeServiceClient object.
Due to Wasb is marked as legacy and and retirement of the (ADLS1), it would
be nice to implement ADLS gen2 hook for interacting with the storage account.
.. seealso::
https://learn.microsoft.com/en-us/azure/storage/blobs/data-lake-storage-directory-file-acl-python
:param adls_conn_id: Reference to the :ref:`adls connection <howto/connection:adls>`.
:param public_read: Whether an anonymous public read access should be used. default is False
"""
conn_name_attr = "adls_conn_id"
default_conn_name = "adls_default"
conn_type = "adls"
hook_name = "Azure Date Lake Storage V2"
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3PasswordFieldWidget, BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import PasswordField, StringField
return {
"connection_string": PasswordField(
lazy_gettext("ADLS Gen2 Connection String (optional)"), widget=BS3PasswordFieldWidget()
),
"tenant_id": StringField(
lazy_gettext("Tenant ID (Active Directory)"), widget=BS3TextFieldWidget()
),
}
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "port"],
"relabeling": {
"login": "Client ID (Active Directory)",
"password": "ADLS Gen2 Key / Client Secret (Active Directory)",
"host": "ADLS Gen2 Account Name",
},
"placeholders": {
"extra": "additional options for use with FileService and AzureFileVolume",
"login": "client id",
"password": "key / secret",
"host": "storage account name",
"connection_string": "connection string (overrides auth)",
"tenant_id": "tenant id",
},
}
def __init__(self, adls_conn_id: str, public_read: bool = False) -> None:
super().__init__()
self.conn_id = adls_conn_id
self.public_read = public_read
self.service_client = self.get_conn()
def get_conn(self) -> DataLakeServiceClient: # type: ignore[override]
"""Return the DataLakeServiceClient object."""
conn = self.get_connection(self.conn_id)
extra = conn.extra_dejson or {}
connection_string = self._get_field(extra, "connection_string")
if connection_string:
# connection_string auth takes priority
return DataLakeServiceClient.from_connection_string(connection_string, **extra)
tenant = self._get_field(extra, "tenant_id")
if tenant:
# use Active Directory auth
app_id = conn.login
app_secret = conn.password
token_credential = ClientSecretCredential(tenant, app_id, app_secret)
return DataLakeServiceClient(
account_url=f"https://{conn.host}.dfs.core.windows.net", credential=token_credential, **extra
)
# otherwise, use key auth
credential = conn.password
return DataLakeServiceClient(
account_url=f"https://{conn.host}.dfs.core.windows.net", credential=credential, **extra
)
def _get_field(self, extra_dict, field_name):
prefix = "extra__adls__"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{prefix}' prefix "
f"when using this method."
)
if field_name in extra_dict:
return extra_dict[field_name] or None
return extra_dict.get(f"{prefix}{field_name}") or None
def create_file_system(self, file_system_name: str) -> None:
"""Create a new file system under the specified account.
A container acts as a file system for your files.
If the file system with the same name already exists, a ResourceExistsError will
be raised. This method returns a client with which to interact with the newly
created file system.
"""
try:
file_system_client = self.service_client.create_file_system(file_system=file_system_name)
self.log.info("Created file system: %s", file_system_client.file_system_name)
except ResourceExistsError:
self.log.info("Attempted to create file system %r but it already exists.", file_system_name)
except Exception as e:
self.log.info("Error while attempting to create file system %r: %s", file_system_name, e)
raise
def get_file_system(self, file_system: FileSystemProperties | str) -> FileSystemClient:
"""Get a client to interact with the specified file system.
:param file_system: This can either be the name of the file system
or an instance of FileSystemProperties.
"""
try:
file_system_client = self.service_client.get_file_system_client(file_system=file_system)
return file_system_client
except ResourceNotFoundError:
self.log.info("file system %r doesn't exists.", file_system)
raise
except Exception as e:
self.log.info("Error while attempting to get file system %r: %s", file_system, e)
raise
def create_directory(
self, file_system_name: FileSystemProperties | str, directory_name: str, **kwargs
) -> DataLakeDirectoryClient:
"""Create a directory under the specified file system.
:param file_system_name: Name of the file system or instance of FileSystemProperties.
:param directory_name: Name of the directory which needs to be created in the file system.
"""
result = self.get_file_system(file_system_name).create_directory(directory_name, kwargs)
return result
def get_directory_client(
self,
file_system_name: FileSystemProperties | str,
directory_name: DirectoryProperties | str,
) -> DataLakeDirectoryClient:
"""Get the specific directory under the specified file system.
:param file_system_name: Name of the file system or instance of FileSystemProperties.
:param directory_name: Name of the directory or instance of DirectoryProperties which needs to be
retrieved from the file system.
"""
try:
directory_client = self.get_file_system(file_system_name).get_directory_client(directory_name)
return directory_client
except ResourceNotFoundError:
self.log.info(
"Directory %s doesn't exists in the file system %s", directory_name, file_system_name
)
raise
except Exception as e:
self.log.info(e)
raise
def create_file(self, file_system_name: FileSystemProperties | str, file_name: str) -> DataLakeFileClient:
"""Create a file under the file system.
:param file_system_name: Name of the file system or instance of FileSystemProperties.
:param file_name: Name of the file which needs to be created in the file system.
"""
file_client = self.get_file_system(file_system_name).create_file(file_name)
return file_client
def upload_file(
self,
file_system_name: FileSystemProperties | str,
file_name: str,
file_path: str,
overwrite: bool = False,
**kwargs: Any,
) -> None:
"""Create a file with data in the file system.
:param file_system_name: Name of the file system or instance of FileSystemProperties.
:param file_name: Name of the file to be created with name.
:param file_path: Path to the file to load.
:param overwrite: Boolean flag to overwrite an existing file or not.
"""
file_client = self.create_file(file_system_name, file_name)
with open(file_path, "rb") as data:
file_client.upload_data(data, overwrite=overwrite, kwargs=kwargs)
def upload_file_to_directory(
self,
file_system_name: str,
directory_name: str,
file_name: str,
file_path: str,
overwrite: bool = False,
**kwargs: Any,
) -> None:
"""Upload data to a file.
:param file_system_name: Name of the file system or instance of FileSystemProperties.
:param directory_name: Name of the directory.
:param file_name: Name of the file to be created with name.
:param file_path: Path to the file to load.
:param overwrite: Boolean flag to overwrite an existing file or not.
"""
directory_client = self.get_directory_client(file_system_name, directory_name=directory_name)
file_client = directory_client.create_file(file_name, kwargs=kwargs)
with open(file_path, "rb") as data:
file_client.upload_data(data, overwrite=overwrite, kwargs=kwargs)
def list_files_directory(
self, file_system_name: FileSystemProperties | str, directory_name: str
) -> list[str]:
"""List files or directories under the specified file system.
:param file_system_name: Name of the file system or instance of FileSystemProperties.
:param directory_name: Name of the directory.
"""
paths = self.get_file_system(file_system=file_system_name).get_paths(directory_name)
directory_lists = []
for path in paths:
directory_lists.append(path.name)
return directory_lists
def list_file_system(
self, prefix: str | None = None, include_metadata: bool = False, **kwargs: Any
) -> list[str]:
"""List file systems under the specified account.
:param prefix:
Filters the results to return only file systems whose names
begin with the specified prefix.
:param include_metadata: Specifies that file system metadata be returned in the response.
The default value is `False`.
"""
file_system = self.service_client.list_file_systems(
name_starts_with=prefix, include_metadata=include_metadata
)
file_system_list = []
for fs in file_system:
file_system_list.append(fs.name)
return file_system_list
def delete_file_system(self, file_system_name: FileSystemProperties | str) -> None:
"""Delete the file system.
:param file_system_name: Name of the file system or instance of FileSystemProperties.
"""
try:
self.service_client.delete_file_system(file_system_name)
self.log.info("Deleted file system: %s", file_system_name)
except ResourceNotFoundError:
self.log.info("file system %r doesn't exists.", file_system_name)
except Exception as e:
self.log.info("Error while attempting to deleting file system %r: %s", file_system_name, e)
raise
def delete_directory(self, file_system_name: FileSystemProperties | str, directory_name: str) -> None:
"""Delete the specified directory in a file system.
:param file_system_name: Name of the file system or instance of FileSystemProperties.
:param directory_name: Name of the directory.
"""
directory_client = self.get_directory_client(file_system_name, directory_name)
directory_client.delete_directory()
def test_connection(self):
"""Test ADLS Gen2 Storage connection."""
try:
# Attempts to list file systems in ADLS Gen2 Storage and retrieves the first
# file_system from the returned iterator. The Azure DataLake Storage allows creation
# of DataLakeServiceClient even if the credentials are incorrect but will fail properly
# if we try to fetch the file_system. We need to _actually_ try to retrieve a
# file_system to properly test the connection
next(self.get_conn().list_file_systems(), None)
return True, "Successfully connected to ADLS Gen2 Storage."
except Exception as e:
return False, str(e)
| 22,133 | 41.16 | 110 | py |
airflow | airflow-main/airflow/providers/microsoft/azure/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.