repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow | airflow-main/airflow/providers/apache/kylin/hooks/kylin.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from kylinpy import exceptions, kylinpy
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
class KylinHook(BaseHook):
"""
Interact with Kylin to run CubeSource commands and get job status.
:param kylin_conn_id: The connection id as configured in Airflow administration.
:param project: project name
:param dsn: dsn
"""
def __init__(
self,
kylin_conn_id: str = "kylin_default",
project: str | None = None,
dsn: str | None = None,
):
super().__init__()
self.kylin_conn_id = kylin_conn_id
self.project = project
self.dsn = dsn
def get_conn(self):
conn = self.get_connection(self.kylin_conn_id)
if self.dsn:
return kylinpy.create_kylin(self.dsn)
self.project = self.project or conn.schema
return kylinpy.Kylin(
conn.host,
username=conn.login,
password=conn.password,
port=conn.port,
project=self.project,
**conn.extra_dejson,
)
def cube_run(self, datasource_name, op, **op_args):
"""
Run CubeSource command which in CubeSource.support_invoke_command.
:param datasource_name:
:param op: command
:param op_args: command args
:return: response
"""
cube_source = self.get_conn().get_datasource(datasource_name)
try:
return cube_source.invoke_command(op, **op_args)
except exceptions.KylinError as err:
raise AirflowException(f"Cube operation {op} error , Message: {err}")
def get_job_status(self, job_id):
"""
Get job status.
:param job_id: kylin job id
:return: job status
"""
return self.get_conn().get_job(job_id).status
| 2,670 | 31.180723 | 84 | py |
airflow | airflow-main/airflow/providers/apache/kylin/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/pinot/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.1.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-pinot:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,537 | 35.619048 | 121 | py |
airflow | airflow-main/airflow/providers/apache/pinot/hooks/pinot.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import subprocess
from typing import Any, Iterable, Mapping
from pinotdb import connect
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection
from airflow.providers.common.sql.hooks.sql import DbApiHook
class PinotAdminHook(BaseHook):
"""
This hook is a wrapper around the pinot-admin.sh script.
For now, only small subset of its subcommands are implemented,
which are required to ingest offline data into Apache Pinot
(i.e., AddSchema, AddTable, CreateSegment, and UploadSegment).
Their command options are based on Pinot v0.1.0.
Unfortunately, as of v0.1.0, pinot-admin.sh always exits with
status code 0. To address this behavior, users can use the
pinot_admin_system_exit flag. If its value is set to false,
this hook evaluates the result based on the output message
instead of the status code. This Pinot's behavior is supposed
to be improved in the next release, which will include the
following PR: https://github.com/apache/incubator-pinot/pull/4110
:param conn_id: The name of the connection to use.
:param cmd_path: Do not modify the parameter. It used to be the filepath to the pinot-admin.sh
executable but in version 4.0.0 of apache-pinot provider, value of this parameter must
remain the default value: `pinot-admin.sh`. It is left here to not accidentally override
the `pinot_admin_system_exit` in case positional parameters were used to initialize the hook.
:param pinot_admin_system_exit: If true, the result is evaluated based on the status code.
Otherwise, the result is evaluated as a failure if "Error" or
"Exception" is in the output message.
"""
def __init__(
self,
conn_id: str = "pinot_admin_default",
cmd_path: str = "pinot-admin.sh",
pinot_admin_system_exit: bool = False,
) -> None:
super().__init__()
conn = self.get_connection(conn_id)
self.host = conn.host
self.port = str(conn.port)
if cmd_path != "pinot-admin.sh":
raise RuntimeError(
"In version 4.0.0 of the PinotAdminHook the cmd_path has been hard-coded to"
" pinot-admin.sh. In order to avoid accidental using of this parameter as"
" positional `pinot_admin_system_exit` the `cmd_parameter`"
" parameter is left here but you should not modify it. Make sure that "
" `pinot-admin.sh` is on your PATH and do not change cmd_path value."
)
self.cmd_path = "pinot-admin.sh"
self.pinot_admin_system_exit = conn.extra_dejson.get(
"pinot_admin_system_exit", pinot_admin_system_exit
)
self.conn = conn
def get_conn(self) -> Any:
return self.conn
def add_schema(self, schema_file: str, with_exec: bool = True) -> Any:
"""
Add Pinot schema by run AddSchema command.
:param schema_file: Pinot schema file
:param with_exec: bool
"""
cmd = ["AddSchema"]
cmd += ["-controllerHost", self.host]
cmd += ["-controllerPort", self.port]
cmd += ["-schemaFile", schema_file]
if with_exec:
cmd += ["-exec"]
self.run_cli(cmd)
def add_table(self, file_path: str, with_exec: bool = True) -> Any:
"""
Add Pinot table with run AddTable command.
:param file_path: Pinot table configure file
:param with_exec: bool
"""
cmd = ["AddTable"]
cmd += ["-controllerHost", self.host]
cmd += ["-controllerPort", self.port]
cmd += ["-filePath", file_path]
if with_exec:
cmd += ["-exec"]
self.run_cli(cmd)
def create_segment(
self,
generator_config_file: str | None = None,
data_dir: str | None = None,
segment_format: str | None = None,
out_dir: str | None = None,
overwrite: str | None = None,
table_name: str | None = None,
segment_name: str | None = None,
time_column_name: str | None = None,
schema_file: str | None = None,
reader_config_file: str | None = None,
enable_star_tree_index: str | None = None,
star_tree_index_spec_file: str | None = None,
hll_size: str | None = None,
hll_columns: str | None = None,
hll_suffix: str | None = None,
num_threads: str | None = None,
post_creation_verification: str | None = None,
retry: str | None = None,
) -> Any:
"""Create Pinot segment by run CreateSegment command."""
cmd = ["CreateSegment"]
if generator_config_file:
cmd += ["-generatorConfigFile", generator_config_file]
if data_dir:
cmd += ["-dataDir", data_dir]
if segment_format:
cmd += ["-format", segment_format]
if out_dir:
cmd += ["-outDir", out_dir]
if overwrite:
cmd += ["-overwrite", overwrite]
if table_name:
cmd += ["-tableName", table_name]
if segment_name:
cmd += ["-segmentName", segment_name]
if time_column_name:
cmd += ["-timeColumnName", time_column_name]
if schema_file:
cmd += ["-schemaFile", schema_file]
if reader_config_file:
cmd += ["-readerConfigFile", reader_config_file]
if enable_star_tree_index:
cmd += ["-enableStarTreeIndex", enable_star_tree_index]
if star_tree_index_spec_file:
cmd += ["-starTreeIndexSpecFile", star_tree_index_spec_file]
if hll_size:
cmd += ["-hllSize", hll_size]
if hll_columns:
cmd += ["-hllColumns", hll_columns]
if hll_suffix:
cmd += ["-hllSuffix", hll_suffix]
if num_threads:
cmd += ["-numThreads", num_threads]
if post_creation_verification:
cmd += ["-postCreationVerification", post_creation_verification]
if retry:
cmd += ["-retry", retry]
self.run_cli(cmd)
def upload_segment(self, segment_dir: str, table_name: str | None = None) -> Any:
"""
Upload Segment with run UploadSegment command.
:param segment_dir:
:param table_name:
:return:
"""
cmd = ["UploadSegment"]
cmd += ["-controllerHost", self.host]
cmd += ["-controllerPort", self.port]
cmd += ["-segmentDir", segment_dir]
if table_name:
cmd += ["-tableName", table_name]
self.run_cli(cmd)
def run_cli(self, cmd: list[str], verbose: bool = True) -> str:
"""
Run command with pinot-admin.sh.
:param cmd: List of command going to be run by pinot-admin.sh script
:param verbose:
"""
command = [self.cmd_path, *cmd]
env = None
if self.pinot_admin_system_exit:
env = os.environ.copy()
java_opts = "-Dpinot.admin.system.exit=true " + os.environ.get("JAVA_OPTS", "")
env.update({"JAVA_OPTS": java_opts})
if verbose:
self.log.info(" ".join(command))
with subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True, env=env
) as sub_process:
stdout = ""
if sub_process.stdout:
for line in iter(sub_process.stdout.readline, b""):
stdout += line.decode("utf-8")
if verbose:
self.log.info(line.decode("utf-8").strip())
sub_process.wait()
# As of Pinot v0.1.0, either of "Error: ..." or "Exception caught: ..."
# is expected to be in the output messages. See:
# https://github.com/apache/incubator-pinot/blob/release-0.1.0/pinot-tools/src/main/java/org/apache/pinot/tools/admin/PinotAdministrator.java#L98-L101
if (self.pinot_admin_system_exit and sub_process.returncode) or (
"Error" in stdout or "Exception" in stdout
):
raise AirflowException(stdout)
return stdout
class PinotDbApiHook(DbApiHook):
"""
Interact with Pinot Broker Query API.
This hook uses standard-SQL endpoint since PQL endpoint is soon to be deprecated.
https://docs.pinot.apache.org/users/api/querying-pinot-using-standard-sql
"""
conn_name_attr = "pinot_broker_conn_id"
default_conn_name = "pinot_broker_default"
supports_autocommit = False
def get_conn(self) -> Any:
"""Establish a connection to pinot broker through pinot dbapi."""
conn = self.get_connection(self.pinot_broker_conn_id) # type: ignore
pinot_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get("endpoint", "/query/sql"),
scheme=conn.extra_dejson.get("schema", "http"),
)
self.log.info("Get the connection to pinot broker on %s", conn.host)
return pinot_broker_conn
def get_uri(self) -> str:
"""
Get the connection uri for pinot broker.
e.g: http://localhost:9000/query/sql
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += f":{conn.port}"
conn_type = conn.conn_type or "http"
endpoint = conn.extra_dejson.get("endpoint", "query/sql")
return f"{conn_type}://{host}/{endpoint}"
def get_records(
self, sql: str | list[str], parameters: Iterable | Mapping | None = None, **kwargs
) -> Any:
"""
Executes the sql and returns a set of records.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchall()
def get_first(self, sql: str | list[str], parameters: Iterable | Mapping | None = None) -> Any:
"""
Executes the sql and returns the first resulting row.
:param sql: the sql statement to be executed (str) or a list of
sql statements to execute
:param parameters: The parameters to render the SQL query with.
"""
with self.get_conn() as cur:
cur.execute(sql)
return cur.fetchone()
def set_autocommit(self, conn: Connection, autocommit: Any) -> Any:
raise NotImplementedError()
def insert_rows(
self,
table: str,
rows: str,
target_fields: str | None = None,
commit_every: int = 1000,
replace: bool = False,
**kwargs: Any,
) -> Any:
raise NotImplementedError()
| 11,852 | 35.027356 | 162 | py |
airflow | airflow-main/airflow/providers/apache/pinot/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/impala/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "1.1.2"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-impala:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,538 | 35.642857 | 122 | py |
airflow | airflow-main/airflow/providers/apache/impala/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/impala/hooks/impala.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from impala.dbapi import connect
from impala.interface import Connection
from airflow.providers.common.sql.hooks.sql import DbApiHook
class ImpalaHook(DbApiHook):
"""Interact with Apache Impala through impyla."""
conn_name_attr = "impala_conn_id"
default_conn_name = "impala_default"
conn_type = "impala"
hook_name = "Impala"
def get_conn(self) -> Connection:
connection = self.get_connection(self.impala_conn_id) # pylint: disable=no-member
return connect(
host=connection.host,
port=connection.port,
user=connection.login,
password=connection.password,
database=connection.schema,
**connection.extra_dejson,
)
| 1,559 | 35.27907 | 90 | py |
airflow | airflow-main/airflow/providers/apache/hive/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "6.1.2"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-hive:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,536 | 35.595238 | 120 | py |
airflow | airflow-main/airflow/providers/apache/hive/macros/hive.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import datetime
def max_partition(
table, schema="default", field=None, filter_map=None, metastore_conn_id="metastore_default"
):
"""
Gets the max partition for a table.
:param schema: The hive schema the table lives in
:param table: The hive table you are interested in, supports the dot
notation as in "my_database.my_table", if a dot is found,
the schema param is disregarded
:param metastore_conn_id: The hive connection you are interested in.
If your default is set you don't need to use this parameter.
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:param field: the field to get the max value from. If there's only
one partition field, this will be inferred
>>> max_partition('airflow.static_babynames_partitioned')
'2015-01-01'
"""
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
if "." in table:
schema, table = table.split(".")
hive_hook = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)
return hive_hook.max_partition(schema=schema, table_name=table, field=field, filter_map=filter_map)
def _closest_date(target_dt, date_list, before_target=None) -> datetime.date | None:
"""
This function finds the date in a list closest to the target date.
An optional parameter can be given to get the closest before or after.
:param target_dt: The target date
:param date_list: The list of dates to search
:param before_target: closest before or after the target
:returns: The closest date
"""
time_before = lambda d: target_dt - d if d <= target_dt else datetime.timedelta.max
time_after = lambda d: d - target_dt if d >= target_dt else datetime.timedelta.max
any_time = lambda d: target_dt - d if d < target_dt else d - target_dt
if before_target is None:
return min(date_list, key=any_time).date()
if before_target:
return min(date_list, key=time_before).date()
else:
return min(date_list, key=time_after).date()
def closest_ds_partition(
table, ds, before=True, schema="default", metastore_conn_id="metastore_default"
) -> str | None:
"""
This function finds the date in a list closest to the target date.
An optional parameter can be given to get the closest before or after.
:param table: A hive table name
:param ds: A datestamp ``%Y-%m-%d`` e.g. ``yyyy-mm-dd``
:param before: closest before (True), after (False) or either side of ds
:param schema: table schema
:param metastore_conn_id: which metastore connection to use
:returns: The closest date
>>> tbl = 'airflow.static_babynames_partitioned'
>>> closest_ds_partition(tbl, '2015-01-02')
'2015-01-01'
"""
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
if "." in table:
schema, table = table.split(".")
hive_hook = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)
partitions = hive_hook.get_partitions(schema=schema, table_name=table)
if not partitions:
return None
part_vals = [list(p.values())[0] for p in partitions]
if ds in part_vals:
return ds
else:
parts = [datetime.datetime.strptime(pv, "%Y-%m-%d") for pv in part_vals]
target_dt = datetime.datetime.strptime(ds, "%Y-%m-%d")
closest_ds = _closest_date(target_dt, parts, before_target=before)
if closest_ds is not None:
return closest_ds.isoformat()
return None
| 4,583 | 39.928571 | 103 | py |
airflow | airflow-main/airflow/providers/apache/hive/macros/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/hive/plugins/hive.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.plugins_manager import AirflowPlugin
from airflow.providers.apache.hive.macros.hive import closest_ds_partition, max_partition
class HivePlugin(AirflowPlugin):
"""Hive plugin - delivering macros used by users that use the provider."""
name = "hive"
macros = [max_partition, closest_ds_partition]
| 1,146 | 38.551724 | 89 | py |
airflow | airflow-main/airflow/providers/apache/hive/plugins/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/hive/transfers/s3_to_hive.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains an operator to move data from an S3 bucket to Hive."""
from __future__ import annotations
import bz2
import gzip
import os
import tempfile
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import TYPE_CHECKING, Any, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.utils.compression import uncompress_file
if TYPE_CHECKING:
from airflow.utils.context import Context
class S3ToHiveOperator(BaseOperator):
"""
Moves data from S3 to Hive.
The operator downloads a file from S3, stores the file locally
before loading it into a Hive table.
If the ``create`` or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata from.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param s3_key: The key to be retrieved from S3. (templated)
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values
:param hive_table: target Hive table, use dot notation to target a
specific database. (templated)
:param delimiter: field delimiter in the file
:param create: whether to create the table if it doesn't exist
:param recreate: whether to drop and recreate the table at every
execution
:param partition: target partition as a dict of partition columns
and values. (templated)
:param headers: whether the file contains column names on the first
line
:param check_headers: whether the column names on the first line should be
checked against the keys of field_dict
:param wildcard_match: whether the s3_key should be interpreted as a Unix
wildcard pattern
:param aws_conn_id: source s3 connection
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`.
:param input_compressed: Boolean to determine if file decompression is
required to process headers
:param tblproperties: TBLPROPERTIES of the hive table being created
:param select_expression: S3 Select expression
"""
template_fields: Sequence[str] = ("s3_key", "partition", "hive_table")
template_ext: Sequence[str] = ()
ui_color = "#a0e08c"
def __init__(
self,
*,
s3_key: str,
field_dict: dict,
hive_table: str,
delimiter: str = ",",
create: bool = True,
recreate: bool = False,
partition: dict | None = None,
headers: bool = False,
check_headers: bool = False,
wildcard_match: bool = False,
aws_conn_id: str = "aws_default",
verify: bool | str | None = None,
hive_cli_conn_id: str = "hive_cli_default",
input_compressed: bool = False,
tblproperties: dict | None = None,
select_expression: str | None = None,
hive_auth: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.s3_key = s3_key
self.field_dict = field_dict
self.hive_table = hive_table
self.delimiter = delimiter
self.create = create
self.recreate = recreate
self.partition = partition
self.headers = headers
self.check_headers = check_headers
self.wildcard_match = wildcard_match
self.hive_cli_conn_id = hive_cli_conn_id
self.aws_conn_id = aws_conn_id
self.verify = verify
self.input_compressed = input_compressed
self.tblproperties = tblproperties
self.select_expression = select_expression
self.hive_auth = hive_auth
if self.check_headers and not (self.field_dict is not None and self.headers):
raise AirflowException("To check_headers provide field_dict and headers")
def execute(self, context: Context):
# Downloading file from S3
s3_hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
hive_hook = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id, auth=self.hive_auth)
self.log.info("Downloading S3 file")
if self.wildcard_match:
if not s3_hook.check_for_wildcard_key(self.s3_key):
raise AirflowException(f"No key matches {self.s3_key}")
s3_key_object = s3_hook.get_wildcard_key(self.s3_key)
elif s3_hook.check_for_key(self.s3_key):
s3_key_object = s3_hook.get_key(self.s3_key)
else:
raise AirflowException(f"The key {self.s3_key} does not exists")
if TYPE_CHECKING:
assert s3_key_object
_, file_ext = os.path.splitext(s3_key_object.key)
if self.select_expression and self.input_compressed and file_ext.lower() != ".gz":
raise AirflowException("GZIP is the only compression format Amazon S3 Select supports")
with TemporaryDirectory(prefix="tmps32hive_") as tmp_dir, NamedTemporaryFile(
mode="wb", dir=tmp_dir, suffix=file_ext
) as f:
self.log.info("Dumping S3 key %s contents to local file %s", s3_key_object.key, f.name)
if self.select_expression:
option = {}
if self.headers:
option["FileHeaderInfo"] = "USE"
if self.delimiter:
option["FieldDelimiter"] = self.delimiter
input_serialization: dict[str, Any] = {"CSV": option}
if self.input_compressed:
input_serialization["CompressionType"] = "GZIP"
content = s3_hook.select_key(
bucket_name=s3_key_object.bucket_name,
key=s3_key_object.key,
expression=self.select_expression,
input_serialization=input_serialization,
)
f.write(content.encode("utf-8"))
else:
s3_key_object.download_fileobj(f)
f.flush()
if self.select_expression or not self.headers:
self.log.info("Loading file %s into Hive", f.name)
hive_hook.load_file(
f.name,
self.hive_table,
field_dict=self.field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties,
)
else:
# Decompressing file
if self.input_compressed:
self.log.info("Uncompressing file %s", f.name)
fn_uncompressed = uncompress_file(f.name, file_ext, tmp_dir)
self.log.info("Uncompressed to %s", fn_uncompressed)
# uncompressed file available now so deleting
# compressed file to save disk space
f.close()
else:
fn_uncompressed = f.name
# Testing if header matches field_dict
if self.check_headers:
self.log.info("Matching file header against field_dict")
header_list = self._get_top_row_as_list(fn_uncompressed)
if not self._match_headers(header_list):
raise AirflowException("Header check failed")
# Deleting top header row
self.log.info("Removing header from file %s", fn_uncompressed)
headless_file = self._delete_top_row_and_compress(fn_uncompressed, file_ext, tmp_dir)
self.log.info("Headless file %s", headless_file)
self.log.info("Loading file %s into Hive", headless_file)
hive_hook.load_file(
headless_file,
self.hive_table,
field_dict=self.field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties,
)
def _get_top_row_as_list(self, file_name):
with open(file_name) as file:
header_line = file.readline().strip()
return header_line.split(self.delimiter)
def _match_headers(self, header_list):
if not header_list:
raise AirflowException("Unable to retrieve header row from file")
field_names = self.field_dict.keys()
if len(field_names) != len(header_list):
self.log.warning(
"Headers count mismatch File headers:\n %s\nField names: \n %s\n", header_list, field_names
)
return False
test_field_match = [h1.lower() == h2.lower() for h1, h2 in zip(header_list, field_names)]
if not all(test_field_match):
self.log.warning(
"Headers do not match field names File headers:\n %s\nField names: \n %s\n",
header_list,
field_names,
)
return False
else:
return True
@staticmethod
def _delete_top_row_and_compress(input_file_name, output_file_ext, dest_dir):
# When output_file_ext is not defined, file is not compressed
open_fn = open
if output_file_ext.lower() == ".gz":
open_fn = gzip.GzipFile
elif output_file_ext.lower() == ".bz2":
open_fn = bz2.BZ2File
_, fn_output = tempfile.mkstemp(suffix=output_file_ext, dir=dest_dir)
with open(input_file_name, "rb") as f_in, open_fn(fn_output, "wb") as f_out:
f_in.seek(0)
next(f_in)
for line in f_in:
f_out.write(line)
return fn_output
| 11,748 | 41.415162 | 107 | py |
airflow | airflow-main/airflow/providers/apache/hive/transfers/hive_to_mysql.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains an operator to move data from Hive to MySQL."""
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.utils.operator_helpers import context_to_airflow_vars
if TYPE_CHECKING:
from airflow.utils.context import Context
class HiveToMySqlOperator(BaseOperator):
"""
Moves data from Hive to MySQL.
Note that for now the data is loaded into memory before being pushed
to MySQL, so this operator should be used for smallish amount of data.
:param sql: SQL query to execute against Hive server. (templated)
:param mysql_table: target MySQL table, use dot notation to target a
specific database. (templated)
:param mysql_conn_id: source mysql connection
:param hiveserver2_conn_id: Reference to the
:ref:`Hive Server2 thrift service connection id <howto/connection:hiveserver2>`.
:param mysql_preoperator: sql statement to run against mysql prior to
import, typically use to truncate of delete in place
of the data coming in, allowing the task to be idempotent (running
the task twice won't double load data). (templated)
:param mysql_postoperator: sql statement to run against mysql after the
import, typically used to move data from staging to
production and issue cleanup commands. (templated)
:param bulk_load: flag to use bulk_load option. This loads mysql directly
from a tab-delimited text file using the LOAD DATA LOCAL INFILE command. The MySQL
server must support loading local files via this command (it is disabled by default).
:param hive_conf:
"""
template_fields: Sequence[str] = ("sql", "mysql_table", "mysql_preoperator", "mysql_postoperator")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {
"sql": "hql",
"mysql_preoperator": "mysql",
"mysql_postoperator": "mysql",
}
ui_color = "#a0e08c"
def __init__(
self,
*,
sql: str,
mysql_table: str,
hiveserver2_conn_id: str = "hiveserver2_default",
mysql_conn_id: str = "mysql_default",
mysql_preoperator: str | None = None,
mysql_postoperator: str | None = None,
bulk_load: bool = False,
hive_conf: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.mysql_table = mysql_table
self.mysql_conn_id = mysql_conn_id
self.mysql_preoperator = mysql_preoperator
self.mysql_postoperator = mysql_postoperator
self.hiveserver2_conn_id = hiveserver2_conn_id
self.bulk_load = bulk_load
self.hive_conf = hive_conf
def execute(self, context: Context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
self.log.info("Extracting data from Hive: %s", self.sql)
hive_conf = context_to_airflow_vars(context)
if self.hive_conf:
hive_conf.update(self.hive_conf)
if self.bulk_load:
with NamedTemporaryFile() as tmp_file:
hive.to_csv(
self.sql,
tmp_file.name,
delimiter="\t",
lineterminator="\n",
output_header=False,
hive_conf=hive_conf,
)
mysql = self._call_preoperator(local_infile=self.bulk_load)
mysql.bulk_load(table=self.mysql_table, tmp_file=tmp_file.name)
else:
hive_results = hive.get_records(self.sql, parameters=hive_conf)
mysql = self._call_preoperator()
mysql.insert_rows(table=self.mysql_table, rows=hive_results)
if self.mysql_postoperator:
self.log.info("Running MySQL postoperator")
mysql.run(self.mysql_postoperator)
self.log.info("Done.")
def _call_preoperator(self, local_infile: bool = False) -> MySqlHook:
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id, local_infile=local_infile)
if self.mysql_preoperator:
self.log.info("Running MySQL preoperator")
mysql.run(self.mysql_preoperator)
self.log.info("Inserting rows into MySQL")
return mysql
| 5,280 | 39.937984 | 102 | py |
airflow | airflow-main/airflow/providers/apache/hive/transfers/vertica_to_hive.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains an operator to move data from Vertica to Hive."""
from __future__ import annotations
import csv
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.providers.vertica.hooks.vertica import VerticaHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class VerticaToHiveOperator(BaseOperator):
"""
Moves data from Vertica to Hive.
The operator runs your query against Vertica, stores the file
locally before loading it into a Hive table. If the ``create``
or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the table gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param sql: SQL query to execute against the Vertica database. (templated)
:param hive_table: target Hive table, use dot notation to target a
specific database. (templated)
:param create: whether to create the table if it doesn't exist
:param recreate: whether to drop and recreate the table at every execution
:param partition: target partition as a dict of partition columns
and values. (templated)
:param delimiter: field delimiter in the file
:param vertica_conn_id: source Vertica connection
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`.
:param hive_auth: optional authentication option passed for the Hive connection
"""
template_fields: Sequence[str] = ("sql", "partition", "hive_table")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
ui_color = "#b4e0ff"
def __init__(
self,
*,
sql: str,
hive_table: str,
create: bool = True,
recreate: bool = False,
partition: dict | None = None,
delimiter: str = chr(1),
vertica_conn_id: str = "vertica_default",
hive_cli_conn_id: str = "hive_cli_default",
hive_auth: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = str(delimiter)
self.vertica_conn_id = vertica_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
self.hive_auth = hive_auth
@classmethod
def type_map(cls, vertica_type):
"""Manually hack Vertica-Python type mapping.
The stock datatype.py does not provide the full type mapping access.
Reference:
https://github.com/uber/vertica-python/blob/master/vertica_python/vertica/column.py
"""
type_map = {
5: "BOOLEAN",
6: "INT",
7: "FLOAT",
8: "STRING",
9: "STRING",
16: "FLOAT",
}
return type_map.get(vertica_type, "STRING")
def execute(self, context: Context):
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id, auth=self.hive_auth)
vertica = VerticaHook(vertica_conn_id=self.vertica_conn_id)
self.log.info("Dumping Vertica query results to local file")
conn = vertica.get_conn()
cursor = conn.cursor()
cursor.execute(self.sql)
with NamedTemporaryFile(mode="w", encoding="utf-8") as f:
csv_writer = csv.writer(f, delimiter=self.delimiter)
field_dict = OrderedDict()
for col_count, field in enumerate(cursor.description, start=1):
col_position = f"Column{col_count}"
field_dict[col_position if field[0] == "" else field[0]] = self.type_map(field[1])
csv_writer.writerows(cursor.iterate())
f.flush()
cursor.close()
conn.close()
self.log.info("Loading file into Hive")
hive.load_file(
f.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
)
| 5,565 | 38.197183 | 98 | py |
airflow | airflow-main/airflow/providers/apache/hive/transfers/mysql_to_hive.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains an operator to move data from MySQL to Hive."""
from __future__ import annotations
import csv
from collections import OrderedDict
from contextlib import closing
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
import MySQLdb
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.providers.mysql.hooks.mysql import MySqlHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class MySqlToHiveOperator(BaseOperator):
"""
Moves data from MySql to Hive.
The operator runs your query against MySQL, stores the file locally
before loading it into a Hive table.
If the ``create`` or ``recreate`` arguments are set to ``True``,
a ``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata. Note that the
table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the table gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param sql: SQL query to execute against the MySQL database. (templated)
:param hive_table: target Hive table, use dot notation to target a
specific database. (templated)
:param create: whether to create the table if it doesn't exist
:param recreate: whether to drop and recreate the table at every
execution
:param partition: target partition as a dict of partition columns
and values. (templated)
:param delimiter: field delimiter in the file
:param quoting: controls when quotes should be generated by csv writer,
It can take on any of the csv.QUOTE_* constants.
:param quotechar: one-character string used to quote fields
containing special characters.
:param escapechar: one-character string used by csv writer to escape
the delimiter or quotechar.
:param mysql_conn_id: source mysql connection
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`.
:param hive_auth: optional authentication option passed for the Hive connection
:param tblproperties: TBLPROPERTIES of the hive table being created
"""
template_fields: Sequence[str] = ("sql", "partition", "hive_table")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "mysql"}
ui_color = "#a0e08c"
def __init__(
self,
*,
sql: str,
hive_table: str,
create: bool = True,
recreate: bool = False,
partition: dict | None = None,
delimiter: str = chr(1),
quoting: int | None = None,
quotechar: str = '"',
escapechar: str | None = None,
mysql_conn_id: str = "mysql_default",
hive_cli_conn_id: str = "hive_cli_default",
hive_auth: str | None = None,
tblproperties: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = str(delimiter)
self.quoting = quoting or csv.QUOTE_MINIMAL
self.quotechar = quotechar
self.escapechar = escapechar
self.mysql_conn_id = mysql_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
self.tblproperties = tblproperties
self.hive_auth = hive_auth
@classmethod
def type_map(cls, mysql_type: int) -> str:
"""Maps MySQL type to Hive type."""
types = MySQLdb.constants.FIELD_TYPE
type_map = {
types.BIT: "INT",
types.DECIMAL: "DOUBLE",
types.NEWDECIMAL: "DOUBLE",
types.DOUBLE: "DOUBLE",
types.FLOAT: "DOUBLE",
types.INT24: "INT",
types.LONG: "BIGINT",
types.LONGLONG: "DECIMAL(38,0)",
types.SHORT: "INT",
types.TINY: "SMALLINT",
types.YEAR: "INT",
types.TIMESTAMP: "TIMESTAMP",
}
return type_map.get(mysql_type, "STRING")
def execute(self, context: Context):
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id, auth=self.hive_auth)
mysql = MySqlHook(mysql_conn_id=self.mysql_conn_id)
self.log.info("Dumping MySQL query results to local file")
with NamedTemporaryFile(mode="w", encoding="utf-8") as f:
with closing(mysql.get_conn()) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute(self.sql)
csv_writer = csv.writer(
f,
delimiter=self.delimiter,
quoting=self.quoting,
quotechar=self.quotechar if self.quoting != csv.QUOTE_NONE else None,
escapechar=self.escapechar,
)
field_dict = OrderedDict()
if cursor.description is not None:
for field in cursor.description:
field_dict[field[0]] = self.type_map(field[1])
csv_writer.writerows(cursor)
f.flush()
self.log.info("Loading file into Hive")
hive.load_file(
f.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties,
)
| 6,719 | 39.239521 | 93 | py |
airflow | airflow-main/airflow/providers/apache/hive/transfers/hive_to_samba.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains an operator to move data from Hive to Samba."""
from __future__ import annotations
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveServer2Hook
from airflow.providers.samba.hooks.samba import SambaHook
from airflow.utils.operator_helpers import context_to_airflow_vars
if TYPE_CHECKING:
from airflow.utils.context import Context
class HiveToSambaOperator(BaseOperator):
"""
Execute hql code in a specific Hive database and load the results as a csv to a Samba location.
:param hql: the hql to be exported. (templated)
:param destination_filepath: the file path to where the file will be pushed onto samba
:param samba_conn_id: reference to the samba destination
:param hiveserver2_conn_id: Reference to the
:ref: `Hive Server2 thrift service connection id <howto/connection:hiveserver2>`.
"""
template_fields: Sequence[str] = ("hql", "destination_filepath")
template_ext: Sequence[str] = (
".hql",
".sql",
)
template_fields_renderers = {"hql": "hql"}
def __init__(
self,
*,
hql: str,
destination_filepath: str,
samba_conn_id: str = "samba_default",
hiveserver2_conn_id: str = "hiveserver2_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.hiveserver2_conn_id = hiveserver2_conn_id
self.samba_conn_id = samba_conn_id
self.destination_filepath = destination_filepath
self.hql = hql.strip().rstrip(";")
def execute(self, context: Context):
with NamedTemporaryFile() as tmp_file:
self.log.info("Fetching file from Hive")
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
hive.to_csv(self.hql, csv_filepath=tmp_file.name, hive_conf=context_to_airflow_vars(context))
self.log.info("Pushing to samba")
samba = SambaHook(samba_conn_id=self.samba_conn_id)
samba.push_from_local(self.destination_filepath, tmp_file.name)
| 2,954 | 38.932432 | 105 | py |
airflow | airflow-main/airflow/providers/apache/hive/transfers/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/hive/transfers/mssql_to_hive.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains an operator to move data from MSSQL to Hive."""
from __future__ import annotations
import csv
from collections import OrderedDict
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Sequence
import pymssql
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class MsSqlToHiveOperator(BaseOperator):
"""
Moves data from Microsoft SQL Server to Hive.
The operator runs your query against Microsoft SQL Server, stores
the file locally before loading it into a Hive table. If the
``create`` or ``recreate`` arguments are set to ``True``, a
``CREATE TABLE`` and ``DROP TABLE`` statements are generated.
Hive data types are inferred from the cursor's metadata.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the table gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param sql: SQL query to execute against the Microsoft SQL Server
database. (templated)
:param hive_table: target Hive table, use dot notation to target a specific
database. (templated)
:param create: whether to create the table if it doesn't exist
:param recreate: whether to drop and recreate the table at every execution
:param partition: target partition as a dict of partition columns and
values. (templated)
:param delimiter: field delimiter in the file
:param mssql_conn_id: source Microsoft SQL Server connection
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`.
:param hive_auth: optional authentication option passed for the Hive connection
:param tblproperties: TBLPROPERTIES of the hive table being created
"""
template_fields: Sequence[str] = ("sql", "partition", "hive_table")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "tsql"}
ui_color = "#a0e08c"
def __init__(
self,
*,
sql: str,
hive_table: str,
create: bool = True,
recreate: bool = False,
partition: dict | None = None,
delimiter: str = chr(1),
mssql_conn_id: str = "mssql_default",
hive_cli_conn_id: str = "hive_cli_default",
hive_auth: str | None = None,
tblproperties: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.hive_table = hive_table
self.partition = partition
self.create = create
self.recreate = recreate
self.delimiter = delimiter
self.mssql_conn_id = mssql_conn_id
self.hive_cli_conn_id = hive_cli_conn_id
self.partition = partition or {}
self.tblproperties = tblproperties
self.hive_auth = hive_auth
@classmethod
def type_map(cls, mssql_type: int) -> str:
"""Maps MsSQL type to Hive type."""
map_dict = {
pymssql.BINARY.value: "INT",
pymssql.DECIMAL.value: "FLOAT",
pymssql.NUMBER.value: "INT",
}
return map_dict.get(mssql_type, "STRING")
def execute(self, context: Context):
mssql = MsSqlHook(mssql_conn_id=self.mssql_conn_id)
self.log.info("Dumping Microsoft SQL Server query results to local file")
with mssql.get_conn() as conn:
with conn.cursor() as cursor:
cursor.execute(self.sql)
with NamedTemporaryFile(mode="w", encoding="utf-8") as tmp_file:
csv_writer = csv.writer(tmp_file, delimiter=self.delimiter)
field_dict = OrderedDict()
for col_count, field in enumerate(cursor.description, start=1):
col_position = f"Column{col_count}"
field_dict[col_position if field[0] == "" else field[0]] = self.type_map(field[1])
csv_writer.writerows(cursor)
tmp_file.flush()
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id, auth=self.hive_auth)
self.log.info("Loading file into Hive")
hive.load_file(
tmp_file.name,
self.hive_table,
field_dict=field_dict,
create=self.create,
partition=self.partition,
delimiter=self.delimiter,
recreate=self.recreate,
tblproperties=self.tblproperties,
)
| 5,664 | 39.755396 | 106 | py |
airflow | airflow-main/airflow/providers/apache/hive/operators/hive.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import re
from typing import TYPE_CHECKING, Any, Sequence
from airflow.configuration import conf
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveCliHook
from airflow.utils import operator_helpers
from airflow.utils.operator_helpers import context_to_airflow_vars
if TYPE_CHECKING:
from airflow.utils.context import Context
class HiveOperator(BaseOperator):
"""
Executes hql code or hive script in a specific Hive database.
:param hql: the hql to be executed. Note that you may also use
a relative path from the dag file of a (template) hive
script. (templated)
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`. (templated)
:param hiveconfs: if defined, these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``
:param hiveconf_jinja_translate: when True, hiveconf-type templating
${var} gets translated into jinja-type templating {{ var }} and
${hiveconf:var} gets translated into jinja-type templating {{ var }}.
Note that you may want to use this along with the
``DAG(user_defined_macros=myargs)`` parameter. View the DAG
object documentation for more details.
:param script_begin_tag: If defined, the operator will get rid of the
part of the script before the first occurrence of `script_begin_tag`
:param run_as_owner: Run HQL code as a DAG's owner.
:param mapred_queue: queue used by the Hadoop CapacityScheduler. (templated)
:param mapred_queue_priority: priority within CapacityScheduler queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:param hive_cli_params: parameters passed to hive CLO
:param auth: optional authentication option passed for the Hive connection
"""
template_fields: Sequence[str] = (
"hql",
"schema",
"hive_cli_conn_id",
"mapred_queue",
"hiveconfs",
"mapred_job_name",
"mapred_queue_priority",
)
template_ext: Sequence[str] = (
".hql",
".sql",
)
template_fields_renderers = {"hql": "hql"}
ui_color = "#f0e4ec"
def __init__(
self,
*,
hql: str,
hive_cli_conn_id: str = "hive_cli_default",
schema: str = "default",
hiveconfs: dict[Any, Any] | None = None,
hiveconf_jinja_translate: bool = False,
script_begin_tag: str | None = None,
run_as_owner: bool = False,
mapred_queue: str | None = None,
mapred_queue_priority: str | None = None,
mapred_job_name: str | None = None,
hive_cli_params: str = "",
auth: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.hql = hql
self.hive_cli_conn_id = hive_cli_conn_id
self.schema = schema
self.hiveconfs = hiveconfs or {}
self.hiveconf_jinja_translate = hiveconf_jinja_translate
self.script_begin_tag = script_begin_tag
self.run_as = None
if run_as_owner:
self.run_as = self.dag.owner
self.mapred_queue = mapred_queue
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
self.hive_cli_params = hive_cli_params
self.auth = auth
job_name_template = conf.get_mandatory_value(
"hive",
"mapred_job_name_template",
fallback="Airflow HiveOperator task for {hostname}.{dag_id}.{task_id}.{execution_date}",
)
self.mapred_job_name_template: str = job_name_template
# assigned lazily - just for consistency we can create the attribute with a
# `None` initial value, later it will be populated by the execute method.
# This also makes `on_kill` implementation consistent since it assumes `self.hook`
# is defined.
self.hook: HiveCliHook | None = None
def get_hook(self) -> HiveCliHook:
"""Get Hive cli hook."""
return HiveCliHook(
hive_cli_conn_id=self.hive_cli_conn_id,
run_as=self.run_as,
mapred_queue=self.mapred_queue,
mapred_queue_priority=self.mapred_queue_priority,
mapred_job_name=self.mapred_job_name,
hive_cli_params=self.hive_cli_params,
auth=self.auth,
)
def prepare_template(self) -> None:
if self.hiveconf_jinja_translate:
self.hql = re.sub(r"(\$\{(hiveconf:)?([ a-zA-Z0-9_]*)\})", r"{{ \g<3> }}", self.hql)
if self.script_begin_tag and self.script_begin_tag in self.hql:
self.hql = "\n".join(self.hql.split(self.script_begin_tag)[1:])
def execute(self, context: Context) -> None:
self.log.info("Executing: %s", self.hql)
self.hook = self.get_hook()
# set the mapred_job_name if it's not set with dag, task, execution time info
if not self.mapred_job_name:
ti = context["ti"]
if ti.execution_date is None:
raise RuntimeError("execution_date is None")
self.hook.mapred_job_name = self.mapred_job_name_template.format(
dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
hostname=ti.hostname.split(".")[0],
)
if self.hiveconf_jinja_translate:
self.hiveconfs = context_to_airflow_vars(context)
else:
self.hiveconfs.update(context_to_airflow_vars(context))
self.log.info("Passing HiveConf: %s", self.hiveconfs)
self.hook.run_cli(hql=self.hql, schema=self.schema, hive_conf=self.hiveconfs)
def dry_run(self) -> None:
# Reset airflow environment variables to prevent
# existing env vars from impacting behavior.
self.clear_airflow_vars()
self.hook = self.get_hook()
self.hook.test_hql(hql=self.hql)
def on_kill(self) -> None:
if self.hook:
self.hook.kill()
def clear_airflow_vars(self) -> None:
"""Reset airflow environment variables to prevent existing ones from impacting behavior."""
blank_env_vars = {
value["env_var_format"]: "" for value in operator_helpers.AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()
}
os.environ.update(blank_env_vars)
| 7,384 | 38.918919 | 110 | py |
airflow | airflow-main/airflow/providers/apache/hive/operators/hive_stats.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import warnings
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Callable, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
from airflow.providers.mysql.hooks.mysql import MySqlHook
from airflow.providers.presto.hooks.presto import PrestoHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class HiveStatsCollectionOperator(BaseOperator):
"""Gather partition statistics and insert them into MySQL.
Statistics are gathered with a dynamically generated Presto query and
inserted with this format. Stats overwrite themselves if you rerun the
same date/partition.
.. code-block:: sql
CREATE TABLE hive_stats (
ds VARCHAR(16),
table_name VARCHAR(500),
metric VARCHAR(200),
value BIGINT
);
:param metastore_conn_id: Reference to the
:ref:`Hive Metastore connection id <howto/connection:hive_metastore>`.
:param table: the source table, in the format ``database.table_name``. (templated)
:param partition: the source partition. (templated)
:param extra_exprs: dict of expression to run against the table where
keys are metric names and values are Presto compatible expressions
:param excluded_columns: list of columns to exclude, consider
excluding blobs, large json columns, ...
:param assignment_func: a function that receives a column name and
a type, and returns a dict of metric names and an Presto expressions.
If None is returned, the global defaults are applied. If an
empty dictionary is returned, no stats are computed for that
column.
"""
template_fields: Sequence[str] = ("table", "partition", "ds", "dttm")
ui_color = "#aff7a6"
def __init__(
self,
*,
table: str,
partition: Any,
extra_exprs: dict[str, Any] | None = None,
excluded_columns: list[str] | None = None,
assignment_func: Callable[[str, str], dict[Any, Any] | None] | None = None,
metastore_conn_id: str = "metastore_default",
presto_conn_id: str = "presto_default",
mysql_conn_id: str = "airflow_db",
**kwargs: Any,
) -> None:
if "col_blacklist" in kwargs:
warnings.warn(
f"col_blacklist kwarg passed to {self.__class__.__name__} "
f"(task_id: {kwargs.get('task_id')}) is deprecated, "
f"please rename it to excluded_columns instead",
category=FutureWarning,
stacklevel=2,
)
excluded_columns = kwargs.pop("col_blacklist")
super().__init__(**kwargs)
self.table = table
self.partition = partition
self.extra_exprs = extra_exprs or {}
self.excluded_columns: list[str] = excluded_columns or []
self.metastore_conn_id = metastore_conn_id
self.presto_conn_id = presto_conn_id
self.mysql_conn_id = mysql_conn_id
self.assignment_func = assignment_func
self.ds = "{{ ds }}"
self.dttm = "{{ execution_date.isoformat() }}"
def get_default_exprs(self, col: str, col_type: str) -> dict[Any, Any]:
"""Get default expressions."""
if col in self.excluded_columns:
return {}
exp = {(col, "non_null"): f"COUNT({col})"}
if col_type in {"double", "int", "bigint", "float"}:
exp[(col, "sum")] = f"SUM({col})"
exp[(col, "min")] = f"MIN({col})"
exp[(col, "max")] = f"MAX({col})"
exp[(col, "avg")] = f"AVG({col})"
elif col_type == "boolean":
exp[(col, "true")] = f"SUM(CASE WHEN {col} THEN 1 ELSE 0 END)"
exp[(col, "false")] = f"SUM(CASE WHEN NOT {col} THEN 1 ELSE 0 END)"
elif col_type == "string":
exp[(col, "len")] = f"SUM(CAST(LENGTH({col}) AS BIGINT))"
exp[(col, "approx_distinct")] = f"APPROX_DISTINCT({col})"
return exp
def execute(self, context: Context) -> None:
metastore = HiveMetastoreHook(metastore_conn_id=self.metastore_conn_id)
table = metastore.get_table(table_name=self.table)
field_types = {col.name: col.type for col in table.sd.cols}
exprs: Any = {("", "count"): "COUNT(*)"}
for col, col_type in list(field_types.items()):
if self.assignment_func:
assign_exprs = self.assignment_func(col, col_type)
if assign_exprs is None:
assign_exprs = self.get_default_exprs(col, col_type)
else:
assign_exprs = self.get_default_exprs(col, col_type)
exprs.update(assign_exprs)
exprs.update(self.extra_exprs)
exprs = OrderedDict(exprs)
exprs_str = ",\n ".join(f"{v} AS {k[0]}__{k[1]}" for k, v in exprs.items())
where_clause_ = [f"{k} = '{v}'" for k, v in self.partition.items()]
where_clause = " AND\n ".join(where_clause_)
sql = f"SELECT {exprs_str} FROM {self.table} WHERE {where_clause};"
presto = PrestoHook(presto_conn_id=self.presto_conn_id)
self.log.info("Executing SQL check: %s", sql)
row = presto.get_first(sql)
self.log.info("Record: %s", row)
if not row:
raise AirflowException("The query returned None")
part_json = json.dumps(self.partition, sort_keys=True)
self.log.info("Deleting rows from previous runs if they exist")
mysql = MySqlHook(self.mysql_conn_id)
sql = f"""
SELECT 1 FROM hive_stats
WHERE
table_name='{self.table}' AND
partition_repr='{part_json}' AND
dttm='{self.dttm}'
LIMIT 1;
"""
if mysql.get_records(sql):
sql = f"""
DELETE FROM hive_stats
WHERE
table_name='{self.table}' AND
partition_repr='{part_json}' AND
dttm='{self.dttm}';
"""
mysql.run(sql)
self.log.info("Pivoting and loading cells into the Airflow db")
rows = [
(self.ds, self.dttm, self.table, part_json) + (r[0][0], r[0][1], r[1]) for r in zip(exprs, row)
]
mysql.insert_rows(
table="hive_stats",
rows=rows,
target_fields=[
"ds",
"dttm",
"table_name",
"partition_repr",
"col",
"metric",
"value",
],
)
| 7,502 | 38.489474 | 107 | py |
airflow | airflow-main/airflow/providers/apache/hive/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/hive/hooks/hive.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import contextlib
import os
import re
import socket
import subprocess
import time
import warnings
from collections import OrderedDict
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import Any, Iterable, Mapping
from airflow.exceptions import AirflowProviderDeprecationWarning
try:
import pandas
except ImportError as e:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(e)
import csv
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.providers.common.sql.hooks.sql import DbApiHook
from airflow.security import utils
from airflow.utils.helpers import as_flattened_list
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING
HIVE_QUEUE_PRIORITIES = ["VERY_HIGH", "HIGH", "NORMAL", "LOW", "VERY_LOW"]
def get_context_from_env_var() -> dict[Any, Any]:
"""
Extract context from env variable, (dag_id, task_id, etc) for use in BashOperator and PythonOperator.
:return: The context of interest.
"""
return {
format_map["default"]: os.environ.get(format_map["env_var_format"], "")
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()
}
class HiveCliHook(BaseHook):
"""Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters by passing ``hive_cli_params``
space separated list of parameters to add to the hive command.
The extra connection parameter ``auth`` gets passed as in the ``jdbc``
connection string as is.
:param hive_cli_conn_id: Reference to the
:ref:`Hive CLI connection id <howto/connection:hive_cli>`.
:param mapred_queue: queue used by the Hadoop Scheduler (Capacity or Fair)
:param mapred_queue_priority: priority within the job queue.
Possible settings include: VERY_HIGH, HIGH, NORMAL, LOW, VERY_LOW
:param mapred_job_name: This name will appear in the jobtracker.
This can make monitoring easier.
:param hive_cli_params: Space separated list of hive command parameters to add to the
hive command.
"""
conn_name_attr = "hive_cli_conn_id"
default_conn_name = "hive_cli_default"
conn_type = "hive_cli"
hook_name = "Hive Client Wrapper"
def __init__(
self,
hive_cli_conn_id: str = default_conn_name,
run_as: str | None = None,
mapred_queue: str | None = None,
mapred_queue_priority: str | None = None,
mapred_job_name: str | None = None,
hive_cli_params: str = "",
auth: str | None = None,
) -> None:
super().__init__()
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params: str = hive_cli_params
self.use_beeline: bool = conn.extra_dejson.get("use_beeline", False)
self.auth = auth
self.conn = conn
self.run_as = run_as
self.sub_process: Any = None
if mapred_queue_priority:
mapred_queue_priority = mapred_queue_priority.upper()
if mapred_queue_priority not in HIVE_QUEUE_PRIORITIES:
raise AirflowException(
f"Invalid Mapred Queue Priority. Valid values are: {', '.join(HIVE_QUEUE_PRIORITIES)}"
)
self.mapred_queue = mapred_queue or conf.get("hive", "default_hive_mapred_queue")
self.mapred_queue_priority = mapred_queue_priority
self.mapred_job_name = mapred_job_name
def _get_proxy_user(self) -> str:
"""This function set the proper proxy_user value in case the user overwrite the default."""
conn = self.conn
proxy_user_value: str = conn.extra_dejson.get("proxy_user", "")
if proxy_user_value == "login" and conn.login:
return f"hive.server2.proxy.user={conn.login}"
if proxy_user_value == "owner" and self.run_as:
return f"hive.server2.proxy.user={self.run_as}"
if proxy_user_value != "": # There is a custom proxy user
return f"hive.server2.proxy.user={proxy_user_value}"
return proxy_user_value # The default proxy user (undefined)
def _prepare_cli_cmd(self) -> list[Any]:
"""This function creates the command list from available information."""
conn = self.conn
hive_bin = "hive"
cmd_extra = []
if self.use_beeline:
hive_bin = "beeline"
self._validate_beeline_parameters(conn)
jdbc_url = f"jdbc:hive2://{conn.host}:{conn.port}/{conn.schema}"
if conf.get("core", "security") == "kerberos":
template = conn.extra_dejson.get("principal", "hive/[email protected]")
if "_HOST" in template:
template = utils.replace_hostname_pattern(utils.get_components(template))
proxy_user = self._get_proxy_user()
if ";" in template:
raise RuntimeError("The principal should not contain the ';' character")
if ";" in proxy_user:
raise RuntimeError("The proxy_user should not contain the ';' character")
jdbc_url += f";principal={template};{proxy_user}"
elif self.auth:
jdbc_url += ";auth=" + self.auth
jdbc_url = f'"{jdbc_url}"'
cmd_extra += ["-u", jdbc_url]
if conn.login:
cmd_extra += ["-n", conn.login]
if conn.password:
cmd_extra += ["-p", conn.password]
hive_params_list = self.hive_cli_params.split()
return [hive_bin] + cmd_extra + hive_params_list
def _validate_beeline_parameters(self, conn):
if ":" in conn.host or "/" in conn.host or ";" in conn.host:
raise Exception(
f"The host used in beeline command ({conn.host}) should not contain ':/;' characters)"
)
try:
int_port = int(conn.port)
if int_port <= 0 or int_port > 65535:
raise Exception(f"The port used in beeline command ({conn.port}) should be in range 0-65535)")
except (ValueError, TypeError) as e:
raise Exception(f"The port used in beeline command ({conn.port}) should be a valid integer: {e})")
if ";" in conn.schema:
raise Exception(
f"The schema used in beeline command ({conn.schema}) should not contain ';' character)"
)
@staticmethod
def _prepare_hiveconf(d: dict[Any, Any]) -> list[Any]:
"""
Prepares a list of hiveconf params from a dictionary of key value pairs.
:param d:
>>> hh = HiveCliHook()
>>> hive_conf = {"hive.exec.dynamic.partition": "true",
... "hive.exec.dynamic.partition.mode": "nonstrict"}
>>> hh._prepare_hiveconf(hive_conf)
["-hiveconf", "hive.exec.dynamic.partition=true",\
"-hiveconf", "hive.exec.dynamic.partition.mode=nonstrict"]
"""
if not d:
return []
return as_flattened_list(zip(["-hiveconf"] * len(d), [f"{k}={v}" for k, v in d.items()]))
def run_cli(
self,
hql: str,
schema: str | None = None,
verbose: bool = True,
hive_conf: dict[Any, Any] | None = None,
) -> Any:
"""
Run an hql statement using the hive cli.
If hive_conf is specified it should be a dict and the entries
will be set as key/value pairs in HiveConf.
:param hql: an hql (hive query language) statement to run with hive cli
:param schema: Name of hive schema (database) to use
:param verbose: Provides additional logging. Defaults to True.
:param hive_conf: if specified these key value pairs will be passed
to hive as ``-hiveconf "key"="value"``. Note that they will be
passed after the ``hive_cli_params`` and thus will override
whatever values are specified in the database.
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
invalid_chars_list = re.findall(r"[^a-z0-9_]", schema)
if invalid_chars_list:
invalid_chars = "".join(char for char in invalid_chars_list)
raise RuntimeError(f"The schema `{schema}` contains invalid characters: {invalid_chars}")
if schema:
hql = f"USE {schema};\n{hql}"
with TemporaryDirectory(prefix="airflow_hiveop_") as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
hql += "\n"
f.write(hql.encode("UTF-8"))
f.flush()
hive_cmd = self._prepare_cli_cmd()
env_context = get_context_from_env_var()
# Only extend the hive_conf if it is defined.
if hive_conf:
env_context.update(hive_conf)
hive_conf_params = self._prepare_hiveconf(env_context)
if self.mapred_queue:
hive_conf_params.extend(
[
"-hiveconf",
f"mapreduce.job.queuename={self.mapred_queue}",
"-hiveconf",
f"mapred.job.queue.name={self.mapred_queue}",
"-hiveconf",
f"tez.queue.name={self.mapred_queue}",
]
)
if self.mapred_queue_priority:
hive_conf_params.extend(
["-hiveconf", f"mapreduce.job.priority={self.mapred_queue_priority}"]
)
if self.mapred_job_name:
hive_conf_params.extend(["-hiveconf", f"mapred.job.name={self.mapred_job_name}"])
hive_cmd.extend(hive_conf_params)
hive_cmd.extend(["-f", f.name])
if verbose:
self.log.info("%s", " ".join(hive_cmd))
sub_process: Any = subprocess.Popen(
hive_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmp_dir, close_fds=True
)
self.sub_process = sub_process
stdout = ""
while True:
line = sub_process.stdout.readline()
if not line:
break
stdout += line.decode("UTF-8")
if verbose:
self.log.info(line.decode("UTF-8").strip())
sub_process.wait()
if sub_process.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql: str) -> None:
"""Test an hql statement using the hive cli and EXPLAIN."""
create, insert, other = [], [], []
for query in hql.split(";"): # naive
query_original = query
query = query.lower().strip()
if query.startswith("create table"):
create.append(query_original)
elif query.startswith(("set ", "add jar ", "create temporary function")):
other.append(query_original)
elif query.startswith("insert"):
insert.append(query_original)
other_ = ";".join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = " ".join(query.split())[:50]
self.log.info("Testing HQL [%s (...)]", query_preview)
if query_set == insert:
query = other_ + "; explain " + query
else:
query = "explain " + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split("\n")[-2]
self.log.info(message)
error_loc = re.search(r"(\d+):(\d+)", message)
if error_loc and error_loc.group(1).isdigit():
lst = int(error_loc.group(1))
begin = max(lst - 2, 0)
end = min(lst + 3, len(query.split("\n")))
context = "\n".join(query.split("\n")[begin:end])
self.log.info("Context :\n %s", context)
else:
self.log.info("SUCCESS")
def load_df(
self,
df: pandas.DataFrame,
table: str,
field_dict: dict[Any, Any] | None = None,
delimiter: str = ",",
encoding: str = "utf8",
pandas_kwargs: Any = None,
**kwargs: Any,
) -> None:
"""
Loads a pandas DataFrame into hive.
Hive data types will be inferred if not passed but column names will
not be sanitized.
:param df: DataFrame to load into a Hive table
:param table: target Hive table, use dot notation to target a
specific database
:param field_dict: mapping from column name to hive data type.
Note that it must be OrderedDict so as to keep columns' order.
:param delimiter: field delimiter in the file
:param encoding: str encoding to use when writing DataFrame to file
:param pandas_kwargs: passed to DataFrame.to_csv
:param kwargs: passed to self.load_file
"""
def _infer_field_types_from_df(df: pandas.DataFrame) -> dict[Any, Any]:
dtype_kind_hive_type = {
"b": "BOOLEAN", # boolean
"i": "BIGINT", # signed integer
"u": "BIGINT", # unsigned integer
"f": "DOUBLE", # floating-point
"c": "STRING", # complex floating-point
"M": "TIMESTAMP", # datetime
"O": "STRING", # object
"S": "STRING", # (byte-)string
"U": "STRING", # Unicode
"V": "STRING", # void
}
order_type = OrderedDict()
for col, dtype in df.dtypes.iteritems():
order_type[col] = dtype_kind_hive_type[dtype.kind]
return order_type
if pandas_kwargs is None:
pandas_kwargs = {}
with TemporaryDirectory(prefix="airflow_hiveop_") as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w") as f:
if field_dict is None:
field_dict = _infer_field_types_from_df(df)
df.to_csv(
path_or_buf=f,
sep=delimiter,
header=False,
index=False,
encoding=encoding,
date_format="%Y-%m-%d %H:%M:%S",
**pandas_kwargs,
)
f.flush()
return self.load_file(
filepath=f.name, table=table, delimiter=delimiter, field_dict=field_dict, **kwargs
)
def load_file(
self,
filepath: str,
table: str,
delimiter: str = ",",
field_dict: dict[Any, Any] | None = None,
create: bool = True,
overwrite: bool = True,
partition: dict[str, Any] | None = None,
recreate: bool = False,
tblproperties: dict[str, Any] | None = None,
) -> None:
"""
Loads a local file into Hive.
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param filepath: local filepath of the file to load
:param table: target Hive table, use dot notation to target a
specific database
:param delimiter: field delimiter in the file
:param field_dict: A dictionary of the fields name in the file
as keys and their Hive types as values.
Note that it must be OrderedDict so as to keep columns' order.
:param create: whether to create the table if it doesn't exist
:param overwrite: whether to overwrite the data in table or partition
:param partition: target partition as a dict of partition columns
and values
:param recreate: whether to drop and recreate the table at every
execution
:param tblproperties: TBLPROPERTIES of the hive table being created
"""
hql = ""
if recreate:
hql += f"DROP TABLE IF EXISTS {table};\n"
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(f"`{k.strip('`')}` {v}" for k, v in field_dict.items())
hql += f"CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(p + " STRING" for p in partition)
hql += f"PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += f"FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(f"'{k}'='{v}'" for k, v in tblproperties.items())
hql += f"TBLPROPERTIES({tprops})\n"
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = f"LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += f"INTO TABLE {table} "
if partition:
pvals = ", ".join(f"{k}='{v}'" for k, v in partition.items())
hql += f"PARTITION ({pvals})"
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ";\n"
self.log.info(hql)
self.run_cli(hql)
def kill(self) -> None:
"""Kill Hive cli command."""
if hasattr(self, "sub_process"):
if self.sub_process.poll() is None:
print("Killing the Hive job")
self.sub_process.terminate()
time.sleep(60)
self.sub_process.kill()
class HiveMetastoreHook(BaseHook):
"""
Wrapper to interact with the Hive Metastore.
:param metastore_conn_id: reference to the
:ref: `metastore thrift service connection id <howto/connection:hive_metastore>`.
"""
# java short max val
MAX_PART_COUNT = 32767
conn_name_attr = "metastore_conn_id"
default_conn_name = "metastore_default"
conn_type = "hive_metastore"
hook_name = "Hive Metastore Thrift"
def __init__(self, metastore_conn_id: str = default_conn_name) -> None:
super().__init__()
self.conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self) -> dict[str, Any]:
# This is for pickling to work despite the thrift hive client not
# being picklable
state = dict(self.__dict__)
del state["metastore"]
return state
def __setstate__(self, d: dict[str, Any]) -> None:
self.__dict__.update(d)
self.__dict__["metastore"] = self.get_metastore_client()
def get_metastore_client(self) -> Any:
"""Returns a Hive thrift client."""
import hmsclient
from thrift.protocol import TBinaryProtocol
from thrift.transport import TSocket, TTransport
host = self._find_valid_host()
conn = self.conn
if not host:
raise AirflowException("Failed to locate the valid server.")
if "authMechanism" in conn.extra_dejson:
warnings.warn(
"The 'authMechanism' option is deprecated. Please use 'auth_mechanism'.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
conn.extra_dejson["auth_mechanism"] = conn.extra_dejson["authMechanism"]
del conn.extra_dejson["authMechanism"]
auth_mechanism = conn.extra_dejson.get("auth_mechanism", "NOSASL")
if conf.get("core", "security") == "kerberos":
auth_mechanism = conn.extra_dejson.get("auth_mechanism", "GSSAPI")
kerberos_service_name = conn.extra_dejson.get("kerberos_service_name", "hive")
conn_socket = TSocket.TSocket(host, conn.port)
if conf.get("core", "security") == "kerberos" and auth_mechanism == "GSSAPI":
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory() -> sasl.Client:
sasl_client = sasl.Client()
sasl_client.setAttr("host", host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", conn_socket)
else:
transport = TTransport.TBufferedTransport(conn_socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
def _find_valid_host(self) -> Any:
conn = self.conn
hosts = conn.host.split(",")
for host in hosts:
host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.info("Trying to connect to %s:%s", host, conn.port)
if host_socket.connect_ex((host, conn.port)) == 0:
self.log.info("Connected to %s:%s", host, conn.port)
host_socket.close()
return host
else:
self.log.error("Could not connect to %s:%s", host, conn.port)
return None
def get_conn(self) -> Any:
return self.metastore
def check_for_partition(self, schema: str, table: str, partition: str) -> bool:
"""
Checks whether a partition exists.
:param schema: Name of hive schema (database) @table belongs to
:param table: Name of hive table @partition belongs to
:param partition: Expression that matches the partitions to check for
(eg `a = 'b' AND c = 'd'`)
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
"""
with self.metastore as client:
partitions = client.get_partitions_by_filter(
schema, table, partition, HiveMetastoreHook.MAX_PART_COUNT
)
return bool(partitions)
def check_for_named_partition(self, schema: str, table: str, partition_name: str) -> Any:
"""
Checks whether a partition with a given name exists.
:param schema: Name of hive schema (database) @table belongs to
:param table: Name of hive table @partition belongs to
:param partition_name: Name of the partitions to check for (eg `a=b/c=d`)
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_named_partition('airflow', t, "ds=2015-01-01")
True
>>> hh.check_for_named_partition('airflow', t, "ds=xxx")
False
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
def get_table(self, table_name: str, db: str = "default") -> Any:
"""Get a metastore table object.
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
"""
if db == "default" and "." in table_name:
db, table_name = table_name.split(".")[:2]
with self.metastore as client:
return client.get_table(dbname=db, tbl_name=table_name)
def get_tables(self, db: str, pattern: str = "*") -> Any:
"""Get a metastore table object."""
with self.metastore as client:
tables = client.get_tables(db_name=db, pattern=pattern)
return client.get_table_objects_by_name(db, tables)
def get_databases(self, pattern: str = "*") -> Any:
"""Get a metastore table object."""
with self.metastore as client:
return client.get_databases(pattern)
def get_partitions(self, schema: str, table_name: str, partition_filter: str | None = None) -> list[Any]:
"""
Returns a list of all partitions in a table.
Works only for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if partition_filter:
parts = client.get_partitions_by_filter(
db_name=schema,
tbl_name=table_name,
filter=partition_filter,
max_parts=HiveMetastoreHook.MAX_PART_COUNT,
)
else:
parts = client.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=HiveMetastoreHook.MAX_PART_COUNT
)
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
@staticmethod
def _get_max_partition_from_part_specs(
part_specs: list[Any], partition_key: str | None, filter_map: dict[str, Any] | None
) -> Any:
"""
Helper method to get max partition of partitions with partition_key from part specs.
key:value pair in filter_map will be used to filter out partitions.
:param part_specs: list of partition specs.
:param partition_key: partition key name.
:param filter_map: partition_key:partition_value map used for partition filtering,
e.g. {'key1': 'value1', 'key2': 'value2'}.
Only partitions matching all partition_key:partition_value
pairs will be considered as candidates of max partition.
:return: Max partition or None if part_specs is empty.
"""
if not part_specs:
return None
# Assuming all specs have the same keys.
if partition_key not in part_specs[0].keys():
raise AirflowException(f"Provided partition_key {partition_key} is not in part_specs.")
is_subset = None
if filter_map:
is_subset = set(filter_map.keys()).issubset(set(part_specs[0].keys()))
if filter_map and not is_subset:
raise AirflowException(
f"Keys in provided filter_map {', '.join(filter_map.keys())} "
f"are not subset of part_spec keys: {', '.join(part_specs[0].keys())}"
)
candidates = [
p_dict[partition_key]
for p_dict in part_specs
if filter_map is None or all(item in p_dict.items() for item in filter_map.items())
]
if not candidates:
return None
else:
return max(candidates)
def max_partition(
self,
schema: str,
table_name: str,
field: str | None = None,
filter_map: dict[Any, Any] | None = None,
) -> Any:
"""
Returns the maximum value for all partitions with given field in a table.
If only one partition key exist in the table, the key will be used as field.
filter_map should be a partition_key:partition_value map and will be used to
filter out partitions.
:param schema: schema name.
:param table_name: table name.
:param field: partition key to get max partition from.
:param filter_map: partition_key:partition_value map used for partition filtering.
>>> hh = HiveMetastoreHook()
>>> filter_map = {'ds': '2015-01-01'}
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow',\
... table_name=t, field='ds', filter_map=filter_map)
'2015-01-01'
"""
with self.metastore as client:
table = client.get_table(dbname=schema, tbl_name=table_name)
key_name_set = {key.name for key in table.partitionKeys}
if len(table.partitionKeys) == 1:
field = table.partitionKeys[0].name
elif not field:
raise AirflowException("Please specify the field you want the max value for.")
elif field not in key_name_set:
raise AirflowException("Provided field is not a partition key.")
if filter_map and not set(filter_map.keys()).issubset(key_name_set):
raise AirflowException("Provided filter_map contains keys that are not partition key.")
part_names = client.get_partition_names(
schema, table_name, max_parts=HiveMetastoreHook.MAX_PART_COUNT
)
part_specs = [client.partition_name_to_spec(part_name) for part_name in part_names]
return HiveMetastoreHook._get_max_partition_from_part_specs(part_specs, field, filter_map)
def table_exists(self, table_name: str, db: str = "default") -> bool:
"""
Check if table exists.
>>> hh = HiveMetastoreHook()
>>> hh.table_exists(db='airflow', table_name='static_babynames')
True
>>> hh.table_exists(db='airflow', table_name='does_not_exist')
False
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
def drop_partitions(self, table_name, part_vals, delete_data=False, db="default"):
"""
Drop partitions from the given table matching the part_vals input.
:param table_name: table name.
:param part_vals: list of partition specs.
:param delete_data: Setting to control if underlying data have to deleted
in addition to dropping partitions.
:param db: Name of hive schema (database) @table belongs to
>>> hh = HiveMetastoreHook()
>>> hh.drop_partitions(db='airflow', table_name='static_babynames',
part_vals="['2020-05-01']")
True
"""
if self.table_exists(table_name, db):
with self.metastore as client:
self.log.info(
"Dropping partition of table %s.%s matching the spec: %s", db, table_name, part_vals
)
return client.drop_partition(db, table_name, part_vals, delete_data)
else:
self.log.info("Table %s.%s does not exist!", db, table_name)
return False
class HiveServer2Hook(DbApiHook):
"""
Wrapper around the pyhive library.
Notes:
* the default auth_mechanism is PLAIN, to override it you
can specify it in the ``extra`` of your connection in the UI
* the default for run_set_variable_statements is true, if you
are using impala you may need to set it to false in the
``extra`` of your connection in the UI
:param hiveserver2_conn_id: Reference to the
:ref: `Hive Server2 thrift service connection id <howto/connection:hiveserver2>`.
:param schema: Hive database name.
"""
conn_name_attr = "hiveserver2_conn_id"
default_conn_name = "hiveserver2_default"
conn_type = "hiveserver2"
hook_name = "Hive Server 2 Thrift"
supports_autocommit = False
def get_conn(self, schema: str | None = None) -> Any:
"""Returns a Hive connection object."""
username: str | None = None
password: str | None = None
db = self.get_connection(self.hiveserver2_conn_id) # type: ignore
if "authMechanism" in db.extra_dejson:
warnings.warn(
"The 'authMechanism' option is deprecated. Please use 'auth_mechanism'.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
db.extra_dejson["auth_mechanism"] = db.extra_dejson["authMechanism"]
del db.extra_dejson["authMechanism"]
auth_mechanism = db.extra_dejson.get("auth_mechanism", "NONE")
if auth_mechanism == "NONE" and db.login is None:
# we need to give a username
username = "airflow"
kerberos_service_name = None
if conf.get("core", "security") == "kerberos":
auth_mechanism = db.extra_dejson.get("auth_mechanism", "KERBEROS")
kerberos_service_name = db.extra_dejson.get("kerberos_service_name", "hive")
# pyhive uses GSSAPI instead of KERBEROS as a auth_mechanism identifier
if auth_mechanism == "GSSAPI":
self.log.warning(
"Detected deprecated 'GSSAPI' for auth_mechanism for %s. Please use 'KERBEROS' instead",
self.hiveserver2_conn_id, # type: ignore
)
auth_mechanism = "KERBEROS"
# Password should be set if and only if in LDAP or CUSTOM mode
if auth_mechanism in ("LDAP", "CUSTOM"):
password = db.password
from pyhive.hive import connect
return connect(
host=db.host,
port=db.port,
auth=auth_mechanism,
kerberos_service_name=kerberos_service_name,
username=db.login or username,
password=password,
database=schema or db.schema or "default",
)
def _get_results(
self,
sql: str | list[str],
schema: str = "default",
fetch_size: int | None = None,
hive_conf: Iterable | Mapping | None = None,
) -> Any:
from pyhive.exc import ProgrammingError
if isinstance(sql, str):
sql = [sql]
previous_description = None
with contextlib.closing(self.get_conn(schema)) as conn, contextlib.closing(conn.cursor()) as cur:
cur.arraysize = fetch_size or 1000
# not all query services (e.g. impala AIRFLOW-4434) support the set command
db = self.get_connection(self.hiveserver2_conn_id) # type: ignore
if db.extra_dejson.get("run_set_variable_statements", True):
env_context = get_context_from_env_var()
if hive_conf:
env_context.update(hive_conf)
for k, v in env_context.items():
cur.execute(f"set {k}={v}")
for statement in sql:
cur.execute(statement)
# we only get results of statements that returns
lowered_statement = statement.lower().strip()
if (
lowered_statement.startswith("select")
or lowered_statement.startswith("with")
or lowered_statement.startswith("show")
or (lowered_statement.startswith("set") and "=" not in lowered_statement)
):
description = cur.description
if previous_description and previous_description != description:
message = f"""The statements are producing different descriptions:
Current: {repr(description)}
Previous: {repr(previous_description)}"""
raise ValueError(message)
elif not previous_description:
previous_description = description
yield description
try:
# DB API 2 raises when no results are returned
# we're silencing here as some statements in the list
# may be `SET` or DDL
yield from cur
except ProgrammingError:
self.log.debug("get_results returned no records")
def get_results(
self,
sql: str | list[str],
schema: str = "default",
fetch_size: int | None = None,
hive_conf: Iterable | Mapping | None = None,
) -> dict[str, Any]:
"""
Get results of the provided hql in target schema.
:param sql: hql to be executed.
:param schema: target schema, default to 'default'.
:param fetch_size: max size of result to fetch.
:param hive_conf: hive_conf to execute alone with the hql.
:return: results of hql execution, dict with data (list of results) and header
"""
results_iter = self._get_results(sql, schema, fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
results = {"data": list(results_iter), "header": header}
return results
def to_csv(
self,
sql: str,
csv_filepath: str,
schema: str = "default",
delimiter: str = ",",
lineterminator: str = "\r\n",
output_header: bool = True,
fetch_size: int = 1000,
hive_conf: dict[Any, Any] | None = None,
) -> None:
"""
Execute hql in target schema and write results to a csv file.
:param sql: hql to be executed.
:param csv_filepath: filepath of csv to write results into.
:param schema: target schema, default to 'default'.
:param delimiter: delimiter of the csv file, default to ','.
:param lineterminator: lineterminator of the csv file.
:param output_header: header of the csv file, default to True.
:param fetch_size: number of result rows to write into the csv file, default to 1000.
:param hive_conf: hive_conf to execute alone with the hql.
"""
results_iter = self._get_results(sql, schema, fetch_size=fetch_size, hive_conf=hive_conf)
header = next(results_iter)
message = None
i = 0
with open(csv_filepath, "w", encoding="utf-8") as file:
writer = csv.writer(file, delimiter=delimiter, lineterminator=lineterminator)
try:
if output_header:
self.log.debug("Cursor description is %s", header)
writer.writerow([c[0] for c in header])
for i, row in enumerate(results_iter, 1):
writer.writerow(row)
if i % fetch_size == 0:
self.log.info("Written %s rows so far.", i)
except ValueError as exception:
message = str(exception)
if message:
# need to clean up the file first
os.remove(csv_filepath)
raise ValueError(message)
self.log.info("Done. Loaded a total of %s rows.", i)
def get_records(
self, sql: str | list[str], parameters: Iterable | Mapping | None = None, **kwargs
) -> Any:
"""
Get a set of records from a Hive query; optionally pass a 'schema' kwarg to specify target schema.
:param sql: hql to be executed.
:param parameters: optional configuration passed to get_results
:return: result of hive execution
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
"""
schema = kwargs["schema"] if "schema" in kwargs else "default"
return self.get_results(sql, schema=schema, hive_conf=parameters)["data"]
def get_pandas_df( # type: ignore
self,
sql: str,
schema: str = "default",
hive_conf: dict[Any, Any] | None = None,
**kwargs,
) -> pandas.DataFrame:
"""
Get a pandas dataframe from a Hive query.
:param sql: hql to be executed.
:param schema: target schema, default to 'default'.
:param hive_conf: hive_conf to execute alone with the hql.
:param kwargs: (optional) passed into pandas.DataFrame constructor
:return: result of hive execution
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
:return: pandas.DateFrame
"""
res = self.get_results(sql, schema=schema, hive_conf=hive_conf)
df = pandas.DataFrame(res["data"], columns=[c[0] for c in res["header"]], **kwargs)
return df
| 42,460 | 39.019793 | 110 | py |
airflow | airflow-main/airflow/providers/apache/hive/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/hive/sensors/metastore_partition.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.providers.common.sql.sensors.sql import SqlSensor
if TYPE_CHECKING:
from airflow.utils.context import Context
class MetastorePartitionSensor(SqlSensor):
"""
An alternative to the HivePartitionSensor that talk directly to the MySQL db.
This was created as a result of observing sub optimal queries generated by the
Metastore thrift service when hitting subpartitioned tables. The Thrift service's
queries were written in a way that would not leverage the indexes.
:param schema: the schema
:param table: the table
:param partition_name: the partition name, as defined in the PARTITIONS
table of the Metastore. Order of the fields does matter.
Examples: ``ds=2016-01-01`` or
``ds=2016-01-01/sub=foo`` for a sub partitioned table
:param mysql_conn_id: a reference to the MySQL conn_id for the metastore
"""
template_fields: Sequence[str] = ("partition_name", "table", "schema")
ui_color = "#8da7be"
def __init__(
self,
*,
table: str,
partition_name: str,
schema: str = "default",
mysql_conn_id: str = "metastore_mysql",
**kwargs: Any,
):
self.partition_name = partition_name
self.table = table
self.schema = schema
self.first_poke = True
self.conn_id = mysql_conn_id
# TODO(aoen): We shouldn't be using SqlSensor here but MetastorePartitionSensor.
# The problem is the way apply_defaults works isn't compatible with inheritance.
# The inheritance model needs to be reworked in order to support overriding args/
# kwargs with arguments here, then 'conn_id' and 'sql' can be passed into the
# constructor below and apply_defaults will no longer throw an exception.
super().__init__(**kwargs)
def poke(self, context: Context) -> Any:
if self.first_poke:
self.first_poke = False
if "." in self.table:
self.schema, self.table = self.table.split(".")
self.sql = """
SELECT 'X'
FROM PARTITIONS A0
LEFT OUTER JOIN TBLS B0 ON A0.TBL_ID = B0.TBL_ID
LEFT OUTER JOIN DBS C0 ON B0.DB_ID = C0.DB_ID
WHERE
B0.TBL_NAME = '{self.table}' AND
C0.NAME = '{self.schema}' AND
A0.PART_NAME = '{self.partition_name}';
""".format(
self=self
)
return super().poke(context)
| 3,386 | 37.488636 | 89 | py |
airflow | airflow-main/airflow/providers/apache/hive/sensors/hive_partition.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class HivePartitionSensor(BaseSensorOperator):
"""
Waits for a partition to show up in Hive.
Note: Because ``partition`` supports general logical operators, it
can be inefficient. Consider using NamedHivePartitionSensor instead if
you don't need the full flexibility of HivePartitionSensor.
:param table: The name of the table to wait for, supports the dot
notation (my_database.my_table)
:param partition: The partition clause to wait for. This is passed as
is to the metastore Thrift client ``get_partitions_by_filter`` method,
and apparently supports SQL like notation as in ``ds='2015-01-01'
AND type='value'`` and comparison operators as in ``"ds>=2015-01-01"``
:param metastore_conn_id: reference to the
:ref: `metastore thrift service connection id <howto/connection:hive_metastore>`
"""
template_fields: Sequence[str] = (
"schema",
"table",
"partition",
)
ui_color = "#C5CAE9"
def __init__(
self,
*,
table: str,
partition: str | None = "ds='{{ ds }}'",
metastore_conn_id: str = "metastore_default",
schema: str = "default",
poke_interval: int = 60 * 3,
**kwargs: Any,
):
super().__init__(poke_interval=poke_interval, **kwargs)
if not partition:
partition = "ds='{{ ds }}'"
self.metastore_conn_id = metastore_conn_id
self.table = table
self.partition = partition
self.schema = schema
def poke(self, context: Context) -> bool:
if "." in self.table:
self.schema, self.table = self.table.split(".")
self.log.info("Poking for table %s.%s, partition %s", self.schema, self.table, self.partition)
if not hasattr(self, "hook"):
hook = HiveMetastoreHook(metastore_conn_id=self.metastore_conn_id)
return hook.check_for_partition(self.schema, self.table, self.partition)
| 3,039 | 37.481013 | 102 | py |
airflow | airflow-main/airflow/providers/apache/hive/sensors/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/hive/sensors/named_hive_partition.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class NamedHivePartitionSensor(BaseSensorOperator):
"""
Waits for a set of partitions to show up in Hive.
:param partition_names: List of fully qualified names of the
partitions to wait for. A fully qualified name is of the
form ``schema.table/pk1=pv1/pk2=pv2``, for example,
default.users/ds=2016-01-01. This is passed as is to the metastore
Thrift client ``get_partitions_by_name`` method. Note that
you cannot use logical or comparison operators as in
HivePartitionSensor.
:param metastore_conn_id: Reference to the
:ref:`metastore thrift service connection id <howto/connection:hive_metastore>`.
"""
template_fields: Sequence[str] = ("partition_names",)
ui_color = "#8d99ae"
def __init__(
self,
*,
partition_names: list[str],
metastore_conn_id: str = "metastore_default",
poke_interval: int = 60 * 3,
hook: Any = None,
**kwargs: Any,
):
super().__init__(poke_interval=poke_interval, **kwargs)
self.next_index_to_poke = 0
if isinstance(partition_names, str):
raise TypeError("partition_names must be an array of strings")
self.metastore_conn_id = metastore_conn_id
self.partition_names = partition_names
self.hook = hook
if self.hook and metastore_conn_id != "metastore_default":
self.log.warning(
"A hook was passed but a non default metastore_conn_id=%s was used", metastore_conn_id
)
@staticmethod
def parse_partition_name(partition: str) -> tuple[Any, ...]:
"""Get schema, table, and partition info."""
first_split = partition.split(".", 1)
if len(first_split) == 1:
schema = "default"
table_partition = max(first_split) # poor man first
else:
schema, table_partition = first_split
second_split = table_partition.split("/", 1)
if len(second_split) == 1:
raise ValueError(f"Could not parse {partition}into table, partition")
else:
table, partition = second_split
return schema, table, partition
def poke_partition(self, partition: str) -> Any:
"""Check for a named partition."""
if not self.hook:
from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook
self.hook = HiveMetastoreHook(metastore_conn_id=self.metastore_conn_id)
schema, table, partition = self.parse_partition_name(partition)
self.log.info("Poking for %s.%s/%s", schema, table, partition)
return self.hook.check_for_named_partition(schema, table, partition)
def poke(self, context: Context) -> bool:
number_of_partitions = len(self.partition_names)
poke_index_start = self.next_index_to_poke
for i in range(number_of_partitions):
self.next_index_to_poke = (poke_index_start + i) % number_of_partitions
if not self.poke_partition(self.partition_names[self.next_index_to_poke]):
return False
self.next_index_to_poke = 0
return True
| 4,159 | 37.518519 | 102 | py |
airflow | airflow-main/airflow/providers/apache/hdfs/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.1.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-hdfs:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,536 | 35.595238 | 120 | py |
airflow | airflow-main/airflow/providers/apache/hdfs/hooks/hdfs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow import AirflowException
from airflow.hooks.base import BaseHook
_EXCEPTION_MESSAGE = """The old HDFS Hooks have been removed in 4.0.0 version of the apache.hdfs provider.
Please convert your DAGs to use the WebHdfsHook or downgrade the provider to below 4.*
if you want to continue using it.
If you want to use earlier provider you can downgrade to latest released 3.* version
using `pip install apache-airflow-providers-hdfs==3.2.1` (no constraints)
"""
class HDFSHookException(AirflowException):
"""
This Exception has been removed and is not functional.
Please convert your DAGs to use the WebHdfsHook or downgrade the provider
to below 4.* if you want to continue using it. If you want to use earlier
provider you can downgrade to latest released 3.* version using
`pip install apache-airflow-providers-hdfs==3.2.1` (no constraints).
"""
def __init__(self, *args, **kwargs):
raise Exception(_EXCEPTION_MESSAGE)
class HDFSHook(BaseHook):
"""
This Hook has been removed and is not functional.
Please convert your DAGs to use the WebHdfsHook or downgrade the provider
to below 4.*. if you want to continue using it. If you want to use earlier
provider you can downgrade to latest released 3.* version using
`pip install apache-airflow-providers-hdfs==3.2.1` (no constraints).
"""
def __init__(self, *args, **kwargs):
raise Exception(_EXCEPTION_MESSAGE)
| 2,277 | 39.678571 | 106 | py |
airflow | airflow-main/airflow/providers/apache/hdfs/hooks/webhdfs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for Web HDFS."""
from __future__ import annotations
import logging
import socket
from typing import Any
import requests
from hdfs import HdfsError, InsecureClient
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
log = logging.getLogger(__name__)
_kerberos_security_mode = conf.get("core", "security") == "kerberos"
if _kerberos_security_mode:
try:
from hdfs.ext.kerberos import KerberosClient
except ImportError:
log.error("Could not load the Kerberos extension for the WebHDFSHook.")
raise
class AirflowWebHDFSHookException(AirflowException):
"""Exception specific for WebHDFS hook."""
class WebHDFSHook(BaseHook):
"""
Interact with HDFS. This class is a wrapper around the hdfscli library.
:param webhdfs_conn_id: The connection id for the webhdfs client to connect to.
:param proxy_user: The user used to authenticate.
"""
def __init__(self, webhdfs_conn_id: str = "webhdfs_default", proxy_user: str | None = None):
super().__init__()
self.webhdfs_conn_id = webhdfs_conn_id
self.proxy_user = proxy_user
def get_conn(self) -> Any:
"""
Establishes a connection depending on the security mode set via config or environment variable.
:return: a hdfscli InsecureClient or KerberosClient object.
"""
connection = self._find_valid_server()
if connection is None:
raise AirflowWebHDFSHookException("Failed to locate the valid server.")
return connection
def _find_valid_server(self) -> Any:
connection = self.get_connection(self.webhdfs_conn_id)
namenodes = connection.host.split(",")
for namenode in namenodes:
host_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.log.info("Trying to connect to %s:%s", namenode, connection.port)
try:
conn_check = host_socket.connect_ex((namenode, connection.port))
if conn_check == 0:
self.log.info("Trying namenode %s", namenode)
client = self._get_client(
namenode,
connection.port,
connection.login,
connection.get_password(),
connection.schema,
connection.extra_dejson,
)
client.status("/")
self.log.info("Using namenode %s for hook", namenode)
host_socket.close()
return client
else:
self.log.warning("Could not connect to %s:%s", namenode, connection.port)
except HdfsError as hdfs_error:
self.log.info("Read operation on namenode %s failed with error: %s", namenode, hdfs_error)
return None
def _get_client(
self, namenode: str, port: int, login: str, password: str | None, schema: str, extra_dejson: dict
) -> Any:
connection_str = f"http://{namenode}"
session = requests.Session()
if password is not None:
session.auth = (login, password)
if extra_dejson.get("use_ssl", "False") == "True" or extra_dejson.get("use_ssl", False):
connection_str = f"https://{namenode}"
session.verify = extra_dejson.get("verify", False)
if port is not None:
connection_str += f":{port}"
if schema is not None:
connection_str += f"/{schema}"
if _kerberos_security_mode:
return KerberosClient(connection_str, session=session)
proxy_user = self.proxy_user or login
return InsecureClient(connection_str, user=proxy_user, session=session)
def check_for_path(self, hdfs_path: str) -> bool:
"""
Check for the existence of a path in HDFS by querying FileStatus.
:param hdfs_path: The path to check.
:return: True if the path exists and False if not.
"""
conn = self.get_conn()
status = conn.status(hdfs_path, strict=False)
return bool(status)
def load_file(
self, source: str, destination: str, overwrite: bool = True, parallelism: int = 1, **kwargs: Any
) -> None:
r"""
Uploads a file to HDFS.
:param source: Local path to file or folder.
If it's a folder, all the files inside of it will be uploaded.
.. note:: This implies that folders empty of files will not be created remotely.
:param destination: PTarget HDFS path.
If it already exists and is a directory, files will be uploaded inside.
:param overwrite: Overwrite any existing file or directory.
:param parallelism: Number of threads to use for parallelization.
A value of `0` (or negative) uses as many threads as there are files.
:param kwargs: Keyword arguments forwarded to :meth:`hdfs.client.Client.upload`.
"""
conn = self.get_conn()
conn.upload(
hdfs_path=destination, local_path=source, overwrite=overwrite, n_threads=parallelism, **kwargs
)
self.log.debug("Uploaded file %s to %s", source, destination)
def read_file(self, filename: str) -> bytes:
"""Read a file from HDFS.
:param filename: The path of the file to read.
:return: File content as a raw string
"""
conn = self.get_conn()
with conn.read(filename) as reader:
content = reader.read()
return content
| 6,440 | 37.112426 | 106 | py |
airflow | airflow-main/airflow/providers/apache/hdfs/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/hdfs/log/hdfs_task_handler.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
import pathlib
import shutil
from functools import cached_property
from urllib.parse import urlsplit
from airflow.configuration import conf
from airflow.providers.apache.hdfs.hooks.webhdfs import WebHDFSHook
from airflow.utils.log.file_task_handler import FileTaskHandler
from airflow.utils.log.logging_mixin import LoggingMixin
class HdfsTaskHandler(FileTaskHandler, LoggingMixin):
"""Logging handler to upload and read from HDFS."""
def __init__(
self, base_log_folder: str, hdfs_log_folder: str, filename_template: str | None = None, **kwargs
):
super().__init__(base_log_folder, filename_template)
self.remote_base = urlsplit(hdfs_log_folder).path
self.log_relative_path = ""
self._hook = None
self.closed = False
self.upload_on_close = True
self.delete_local_copy = (
kwargs["delete_local_copy"]
if "delete_local_copy" in kwargs
else conf.getboolean("logging", "delete_local_logs", fallback=False)
)
@cached_property
def hook(self):
"""Returns WebHDFSHook."""
return WebHDFSHook(webhdfs_conn_id=conf.get("logging", "REMOTE_LOG_CONN_ID"))
def set_context(self, ti):
super().set_context(ti)
# Local location and remote location is needed to open and
# upload local log file to HDFS storage.
full_path = self.handler.baseFilename
self.log_relative_path = pathlib.Path(full_path).relative_to(self.local_base).as_posix()
is_trigger_log_context = getattr(ti, "is_trigger_log_context", False)
self.upload_on_close = is_trigger_log_context or not ti.raw
# Clear the file first so that duplicate data is not uploaded
# when re-using the same path (e.g. with rescheduled sensors)
if self.upload_on_close:
with open(self.handler.baseFilename, "w"):
pass
def close(self):
"""Close and upload local log file to HDFS."""
# When application exit, system shuts down all handlers by
# calling close method. Here we check if logger is already
# closed to prevent uploading the log to remote storage multiple
# times when `logging.shutdown` is called.
if self.closed:
return
super().close()
if not self.upload_on_close:
return
local_loc = os.path.join(self.local_base, self.log_relative_path)
remote_loc = os.path.join(self.remote_base, self.log_relative_path)
if os.path.exists(local_loc) and os.path.isfile(local_loc):
self.hook.load_file(local_loc, remote_loc)
if self.delete_local_copy:
shutil.rmtree(os.path.dirname(local_loc))
# Mark closed so we don't double write if close is called twice
self.closed = True
def _read_remote_logs(self, ti, try_number, metadata=None):
# Explicitly getting log relative path is necessary as the given
# task instance might be different from task instance passed
# in set_context method.
worker_log_rel_path = self._render_filename(ti, try_number)
logs = []
messages = []
file_path = os.path.join(self.remote_base, worker_log_rel_path)
if self.hook.check_for_path(file_path):
logs.append(self.hook.read_file(file_path).decode("utf-8"))
else:
messages.append(f"No logs found on hdfs for ti={ti}")
return messages, logs
def _read(self, ti, try_number, metadata=None):
"""
Read logs of given task instance and try_number from HDFS.
If failed, read the log from task instance host machine.
todo: when min airflow version >= 2.6 then remove this method (``_read``)
:param ti: task instance object
:param try_number: task instance try_number to read logs from
:param metadata: log metadata,
can be used for steaming log reading and auto-tailing.
"""
# from airflow 2.6 we no longer implement the _read method
if hasattr(super(), "_read_remote_logs"):
return super()._read(ti, try_number, metadata)
# if we get here, we're on airflow < 2.6 and we use this backcompat logic
messages, logs = self._read_remote_logs(ti, try_number, metadata)
if logs:
return "".join(f"*** {x}\n" for x in messages) + "\n".join(logs), {"end_of_log": True}
else:
if metadata and metadata.get("log_pos", 0) > 0:
log_prefix = ""
else:
log_prefix = "*** Falling back to local log\n"
local_log, metadata = super()._read(ti, try_number, metadata)
return f"{log_prefix}{local_log}", metadata
| 5,614 | 40.286765 | 104 | py |
airflow | airflow-main/airflow/providers/apache/hdfs/log/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/hdfs/sensors/hdfs.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from airflow.sensors.base import BaseSensorOperator
_EXCEPTION_MESSAGE = """The old HDFS Sensors have been removed in 4.0.0 version of the apache.hdfs provider.
Please convert your DAGs to use the WebHdfsSensor or downgrade the provider to below 4.*
if you want to continue using it.
If you want to use earlier provider you can downgrade to latest released 3.* version
using `pip install apache-airflow-providers-hdfs==3.2.1` (no constraints)
"""
class HdfsSensor(BaseSensorOperator):
"""
This Sensor has been removed and is not functional.
Please convert your DAGs to use the WebHdfsSensor or downgrade the provider
to below 4.* if you want to continue using it. If you want to use earlier
provider you can downgrade to latest released 3.* version using
`pip install apache-airflow-providers-hdfs==3.2.1` (no constraints).
"""
def __init__(self, *args, **kwargs):
raise Exception(_EXCEPTION_MESSAGE)
class HdfsRegexSensor(HdfsSensor): # noqa: D101 Ignore missing docstring
def __init__(self, *args, **kwargs):
raise Exception(_EXCEPTION_MESSAGE)
class HdfsFolderSensor(HdfsSensor): # noqa: D101 Ignore missing docstring
def __init__(self, *args, **kwargs):
raise Exception(_EXCEPTION_MESSAGE)
| 2,090 | 40 | 108 | py |
airflow | airflow-main/airflow/providers/apache/hdfs/sensors/web_hdfs.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class WebHdfsSensor(BaseSensorOperator):
"""Waits for a file or folder to land in HDFS."""
template_fields: Sequence[str] = ("filepath",)
def __init__(self, *, filepath: str, webhdfs_conn_id: str = "webhdfs_default", **kwargs: Any) -> None:
super().__init__(**kwargs)
self.filepath = filepath
self.webhdfs_conn_id = webhdfs_conn_id
def poke(self, context: Context) -> bool:
from airflow.providers.apache.hdfs.hooks.webhdfs import WebHDFSHook
hook = WebHDFSHook(self.webhdfs_conn_id)
self.log.info("Poking for file %s", self.filepath)
return hook.check_for_path(hdfs_path=self.filepath)
| 1,653 | 36.590909 | 106 | py |
airflow | airflow-main/airflow/providers/apache/hdfs/sensors/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/beam/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "5.1.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-beam:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,536 | 35.595238 | 120 | py |
airflow | airflow-main/airflow/providers/apache/beam/operators/beam.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Apache Beam operators."""
from __future__ import annotations
import asyncio
import contextlib
import copy
import os
import stat
import tempfile
from abc import ABC, ABCMeta, abstractmethod
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import ExitStack
from functools import partial
from typing import IO, TYPE_CHECKING, Any, Callable, Sequence
from airflow import AirflowException
from airflow.configuration import conf
from airflow.models import BaseOperator
from airflow.providers.apache.beam.hooks.beam import BeamHook, BeamRunnerType
from airflow.providers.apache.beam.triggers.beam import BeamPipelineTrigger
from airflow.providers.google.cloud.hooks.dataflow import (
DataflowHook,
process_line_and_extract_dataflow_job_id_callback,
)
from airflow.providers.google.cloud.hooks.gcs import GCSHook, _parse_gcs_url
from airflow.providers.google.cloud.links.dataflow import DataflowJobLink
from airflow.providers.google.cloud.operators.dataflow import CheckJobRunning, DataflowConfiguration
from airflow.utils.helpers import convert_camel_to_snake, exactly_one
from airflow.version import version
if TYPE_CHECKING:
from airflow.utils.context import Context
class BeamDataflowMixin(metaclass=ABCMeta):
"""
Helper class to store common, Dataflow specific logic for both.
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunPythonPipelineOperator`,
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunJavaPipelineOperator` and
:class:`~airflow.providers.apache.beam.operators.beam.BeamRunGoPipelineOperator`.
"""
dataflow_hook: DataflowHook | None
dataflow_config: DataflowConfiguration
gcp_conn_id: str
dataflow_support_impersonation: bool = True
def _set_dataflow(
self,
pipeline_options: dict,
job_name_variable_key: str | None = None,
) -> tuple[str, dict, Callable[[str], None]]:
self.dataflow_hook = self.__set_dataflow_hook()
self.dataflow_config.project_id = self.dataflow_config.project_id or self.dataflow_hook.project_id
dataflow_job_name = self.__get_dataflow_job_name()
pipeline_options = self.__get_dataflow_pipeline_options(
pipeline_options, dataflow_job_name, job_name_variable_key
)
process_line_callback = self.__get_dataflow_process_callback()
return dataflow_job_name, pipeline_options, process_line_callback
def __set_dataflow_hook(self) -> DataflowHook:
self.dataflow_hook = DataflowHook(
gcp_conn_id=self.dataflow_config.gcp_conn_id or self.gcp_conn_id,
poll_sleep=self.dataflow_config.poll_sleep,
impersonation_chain=self.dataflow_config.impersonation_chain,
drain_pipeline=self.dataflow_config.drain_pipeline,
cancel_timeout=self.dataflow_config.cancel_timeout,
wait_until_finished=self.dataflow_config.wait_until_finished,
)
return self.dataflow_hook
def __get_dataflow_job_name(self) -> str:
return DataflowHook.build_dataflow_job_name(
self.dataflow_config.job_name, self.dataflow_config.append_job_name
)
def __get_dataflow_pipeline_options(
self, pipeline_options: dict, job_name: str, job_name_key: str | None = None
) -> dict:
pipeline_options = copy.deepcopy(pipeline_options)
if job_name_key is not None:
pipeline_options[job_name_key] = job_name
if self.dataflow_config.service_account:
pipeline_options["serviceAccount"] = self.dataflow_config.service_account
if self.dataflow_support_impersonation and self.dataflow_config.impersonation_chain:
if isinstance(self.dataflow_config.impersonation_chain, list):
pipeline_options["impersonateServiceAccount"] = ",".join(
self.dataflow_config.impersonation_chain
)
else:
pipeline_options["impersonateServiceAccount"] = self.dataflow_config.impersonation_chain
pipeline_options["project"] = self.dataflow_config.project_id
pipeline_options["region"] = self.dataflow_config.location
pipeline_options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
return pipeline_options
def __get_dataflow_process_callback(self) -> Callable[[str], None]:
def set_current_dataflow_job_id(job_id):
self.dataflow_job_id = job_id
return process_line_and_extract_dataflow_job_id_callback(
on_new_job_id_callback=set_current_dataflow_job_id
)
class BeamBasePipelineOperator(BaseOperator, BeamDataflowMixin, ABC):
"""
Abstract base class for Beam Pipeline Operators.
:param runner: Runner on which pipeline will be run. By default "DirectRunner" is being used.
Other possible options: DataflowRunner, SparkRunner, FlinkRunner, PortableRunner.
See: :class:`~providers.apache.beam.hooks.beam.BeamRunnerType`
See: https://beam.apache.org/documentation/runners/capability-matrix/
:param default_pipeline_options: Map of default pipeline options.
:param pipeline_options: Map of pipeline options.The key must be a dictionary.
The value can contain different types:
* If the value is None, the single option - ``--key`` (without value) will be added.
* If the value is False, this option will be skipped
* If the value is True, the single option - ``--key`` (without value) will be added.
* If the value is list, the many options will be added for each key.
If the value is ``['A', 'B']`` and the key is ``key`` then the ``--key=A --key=B`` options
will be left
* Other value types will be replaced with the Python textual representation.
When defining labels (labels option), you can also provide a dictionary.
:param gcp_conn_id: Optional.
The connection ID to use connecting to Google Cloud Storage if python file is on GCS.
:param dataflow_config: Dataflow's configuration, used when runner type is set to DataflowRunner,
(optional) defaults to None.
"""
def __init__(
self,
*,
runner: str = "DirectRunner",
default_pipeline_options: dict | None = None,
pipeline_options: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
dataflow_config: DataflowConfiguration | dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.runner = runner
self.default_pipeline_options = default_pipeline_options or {}
self.pipeline_options = pipeline_options or {}
self.gcp_conn_id = gcp_conn_id
if isinstance(dataflow_config, dict):
self.dataflow_config = DataflowConfiguration(**dataflow_config)
else:
self.dataflow_config = dataflow_config or DataflowConfiguration()
self.beam_hook: BeamHook
self.dataflow_hook: DataflowHook | None = None
self.dataflow_job_id: str | None = None
if self.dataflow_config and self.runner.lower() != BeamRunnerType.DataflowRunner.lower():
self.log.warning(
"dataflow_config is defined but runner is different than DataflowRunner (%s)", self.runner
)
def _init_pipeline_options(
self,
format_pipeline_options: bool = False,
job_name_variable_key: str | None = None,
) -> tuple[bool, str | None, dict, Callable[[str], None] | None]:
self.beam_hook = BeamHook(runner=self.runner)
pipeline_options = self.default_pipeline_options.copy()
process_line_callback: Callable[[str], None] | None = None
is_dataflow = self.runner.lower() == BeamRunnerType.DataflowRunner.lower()
dataflow_job_name: str | None = None
if is_dataflow:
dataflow_job_name, pipeline_options, process_line_callback = self._set_dataflow(
pipeline_options=pipeline_options,
job_name_variable_key=job_name_variable_key,
)
self.log.info(pipeline_options)
pipeline_options.update(self.pipeline_options)
if format_pipeline_options:
snake_case_pipeline_options = {
convert_camel_to_snake(key): pipeline_options[key] for key in pipeline_options
}
return is_dataflow, dataflow_job_name, snake_case_pipeline_options, process_line_callback
return is_dataflow, dataflow_job_name, pipeline_options, process_line_callback
class BeamRunPythonPipelineOperator(BeamBasePipelineOperator):
"""
Launch Apache Beam pipelines written in Python.
Note that both ``default_pipeline_options`` and ``pipeline_options``
will be merged to specify pipeline execution parameter, and
``default_pipeline_options`` is expected to save high-level options,
for instances, project and zone information, which apply to all beam
operators in the DAG.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BeamRunPythonPipelineOperator`
.. seealso::
For more detail on Apache Beam have a look at the reference:
https://beam.apache.org/documentation/
:param py_file: Reference to the python Apache Beam pipeline file.py, e.g.,
/some/local/file/path/to/your/python/pipeline/file. (templated)
:param py_options: Additional python options, e.g., ["-m", "-v"].
:param py_interpreter: Python version of the beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache_beam package if it is not installed on your system or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param deferrable: Run operator in the deferrable mode: checks for the state using asynchronous calls.
"""
template_fields: Sequence[str] = (
"py_file",
"runner",
"pipeline_options",
"default_pipeline_options",
"dataflow_config",
)
template_fields_renderers = {"dataflow_config": "json", "pipeline_options": "json"}
operator_extra_links = (DataflowJobLink(),)
def __init__(
self,
*,
py_file: str,
runner: str = "DirectRunner",
default_pipeline_options: dict | None = None,
pipeline_options: dict | None = None,
py_interpreter: str = "python3",
py_options: list[str] | None = None,
py_requirements: list[str] | None = None,
py_system_site_packages: bool = False,
gcp_conn_id: str = "google_cloud_default",
dataflow_config: DataflowConfiguration | dict | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs,
) -> None:
super().__init__(
runner=runner,
default_pipeline_options=default_pipeline_options,
pipeline_options=pipeline_options,
gcp_conn_id=gcp_conn_id,
dataflow_config=dataflow_config,
**kwargs,
)
self.py_file = py_file
self.py_options = py_options or []
self.py_interpreter = py_interpreter
self.py_requirements = py_requirements
self.py_system_site_packages = py_system_site_packages
self.pipeline_options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
self.deferrable = deferrable
def execute(self, context: Context):
"""Execute the Apache Beam Pipeline."""
(
self.is_dataflow,
self.dataflow_job_name,
self.snake_case_pipeline_options,
self.process_line_callback,
) = self._init_pipeline_options(format_pipeline_options=True, job_name_variable_key="job_name")
if not self.beam_hook:
raise AirflowException("Beam hook is not defined.")
# Check deferrable parameter passed to the operator
# to determine type of run - asynchronous or synchronous
if self.deferrable:
asyncio.run(self.execute_async(context))
else:
return self.execute_sync(context)
def execute_sync(self, context: Context):
with ExitStack() as exit_stack:
if self.py_file.lower().startswith("gs://"):
gcs_hook = GCSHook(gcp_conn_id=self.gcp_conn_id)
tmp_gcs_file = exit_stack.enter_context(gcs_hook.provide_file(object_url=self.py_file))
self.py_file = tmp_gcs_file.name
if self.is_dataflow and self.dataflow_hook:
with self.dataflow_hook.provide_authorized_gcloud():
self.beam_hook.start_python_pipeline(
variables=self.snake_case_pipeline_options,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
process_line_callback=self.process_line_callback,
)
DataflowJobLink.persist(
self,
context,
self.dataflow_config.project_id,
self.dataflow_config.location,
self.dataflow_job_id,
)
return {"dataflow_job_id": self.dataflow_job_id}
else:
self.beam_hook.start_python_pipeline(
variables=self.snake_case_pipeline_options,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
process_line_callback=self.process_line_callback,
)
async def execute_async(self, context: Context):
# Creating a new event loop to manage I/O operations asynchronously
loop = asyncio.get_event_loop()
if self.py_file.lower().startswith("gs://"):
gcs_hook = GCSHook(gcp_conn_id=self.gcp_conn_id)
# Running synchronous `enter_context()` method in a separate
# thread using the default executor `None`. The `run_in_executor()` function returns the
# file object, which is created using gcs function `provide_file()`, asynchronously.
# This means we can perform asynchronous operations with this file.
create_tmp_file_call = gcs_hook.provide_file(object_url=self.py_file)
tmp_gcs_file: IO[str] = await loop.run_in_executor(
None, contextlib.ExitStack().enter_context, create_tmp_file_call
)
self.py_file = tmp_gcs_file.name
if self.is_dataflow and self.dataflow_hook:
DataflowJobLink.persist(
self,
context,
self.dataflow_config.project_id,
self.dataflow_config.location,
self.dataflow_job_id,
)
with self.dataflow_hook.provide_authorized_gcloud():
self.defer(
trigger=BeamPipelineTrigger(
variables=self.snake_case_pipeline_options,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
runner=self.runner,
),
method_name="execute_complete",
)
else:
self.defer(
trigger=BeamPipelineTrigger(
variables=self.snake_case_pipeline_options,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
runner=self.runner,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any]):
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was
successful.
"""
if event["status"] == "error":
raise AirflowException(event["message"])
self.log.info(
"%s completed with response %s ",
self.task_id,
event["message"],
)
return {"dataflow_job_id": self.dataflow_job_id}
def on_kill(self) -> None:
if self.dataflow_hook and self.dataflow_job_id:
self.log.info("Dataflow job with id: `%s` was requested to be cancelled.", self.dataflow_job_id)
self.dataflow_hook.cancel_job(
job_id=self.dataflow_job_id,
project_id=self.dataflow_config.project_id,
)
class BeamRunJavaPipelineOperator(BeamBasePipelineOperator):
"""
Launching Apache Beam pipelines written in Java.
Note that both
``default_pipeline_options`` and ``pipeline_options`` will be merged to specify pipeline
execution parameter, and ``default_pipeline_options`` is expected to save
high-level pipeline_options, for instances, project and zone information, which
apply to all Apache Beam operators in the DAG.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BeamRunJavaPipelineOperator`
.. seealso::
For more detail on Apache Beam have a look at the reference:
https://beam.apache.org/documentation/
You need to pass the path to your jar file as a file reference with the ``jar``
parameter, the jar needs to be a self executing jar (see documentation here:
https://beam.apache.org/documentation/runners/dataflow/#self-executing-jar).
Use ``pipeline_options`` to pass on pipeline_options to your job.
:param jar: The reference to a self executing Apache Beam jar (templated).
:param job_class: The name of the Apache Beam pipeline class to be executed, it
is often not the main class configured in the pipeline jar file.
"""
template_fields: Sequence[str] = (
"jar",
"runner",
"job_class",
"pipeline_options",
"default_pipeline_options",
"dataflow_config",
)
template_fields_renderers = {"dataflow_config": "json", "pipeline_options": "json"}
ui_color = "#0273d4"
operator_extra_links = (DataflowJobLink(),)
def __init__(
self,
*,
jar: str,
runner: str = "DirectRunner",
job_class: str | None = None,
default_pipeline_options: dict | None = None,
pipeline_options: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
dataflow_config: DataflowConfiguration | dict | None = None,
**kwargs,
) -> None:
super().__init__(
runner=runner,
default_pipeline_options=default_pipeline_options,
pipeline_options=pipeline_options,
gcp_conn_id=gcp_conn_id,
dataflow_config=dataflow_config,
**kwargs,
)
self.jar = jar
self.job_class = job_class
def execute(self, context: Context):
"""Execute the Apache Beam Pipeline."""
(
is_dataflow,
dataflow_job_name,
pipeline_options,
process_line_callback,
) = self._init_pipeline_options()
if not self.beam_hook:
raise AirflowException("Beam hook is not defined.")
with ExitStack() as exit_stack:
if self.jar.lower().startswith("gs://"):
gcs_hook = GCSHook(self.gcp_conn_id)
tmp_gcs_file = exit_stack.enter_context(gcs_hook.provide_file(object_url=self.jar))
self.jar = tmp_gcs_file.name
if is_dataflow and self.dataflow_hook:
is_running = False
if self.dataflow_config.check_if_running != CheckJobRunning.IgnoreJob:
is_running = (
# The reason for disable=no-value-for-parameter is that project_id parameter is
# required but here is not passed, moreover it cannot be passed here.
# This method is wrapped by @_fallback_to_project_id_from_variables decorator which
# fallback project_id value from variables and raise error if project_id is
# defined both in variables and as parameter (here is already defined in variables)
self.dataflow_hook.is_job_dataflow_running(
name=self.dataflow_config.job_name,
variables=pipeline_options,
)
)
while is_running and self.dataflow_config.check_if_running == CheckJobRunning.WaitForRun:
# The reason for disable=no-value-for-parameter is that project_id parameter is
# required but here is not passed, moreover it cannot be passed here.
# This method is wrapped by @_fallback_to_project_id_from_variables decorator which
# fallback project_id value from variables and raise error if project_id is
# defined both in variables and as parameter (here is already defined in variables)
is_running = self.dataflow_hook.is_job_dataflow_running(
name=self.dataflow_config.job_name,
variables=pipeline_options,
)
if not is_running:
pipeline_options["jobName"] = dataflow_job_name
with self.dataflow_hook.provide_authorized_gcloud():
self.beam_hook.start_java_pipeline(
variables=pipeline_options,
jar=self.jar,
job_class=self.job_class,
process_line_callback=process_line_callback,
)
if dataflow_job_name and self.dataflow_config.location:
multiple_jobs = self.dataflow_config.multiple_jobs or False
DataflowJobLink.persist(
self,
context,
self.dataflow_config.project_id,
self.dataflow_config.location,
self.dataflow_job_id,
)
self.dataflow_hook.wait_for_done(
job_name=dataflow_job_name,
location=self.dataflow_config.location,
job_id=self.dataflow_job_id,
multiple_jobs=multiple_jobs,
project_id=self.dataflow_config.project_id,
)
return {"dataflow_job_id": self.dataflow_job_id}
else:
self.beam_hook.start_java_pipeline(
variables=pipeline_options,
jar=self.jar,
job_class=self.job_class,
process_line_callback=process_line_callback,
)
def on_kill(self) -> None:
if self.dataflow_hook and self.dataflow_job_id:
self.log.info("Dataflow job with id: `%s` was requested to be cancelled.", self.dataflow_job_id)
self.dataflow_hook.cancel_job(
job_id=self.dataflow_job_id,
project_id=self.dataflow_config.project_id,
)
class BeamRunGoPipelineOperator(BeamBasePipelineOperator):
"""
Launch Apache Beam pipelines written in Go.
Note that both ``default_pipeline_options`` and ``pipeline_options``
will be merged to specify pipeline execution parameter, and
``default_pipeline_options`` is expected to save high-level options,
for instances, project and zone information, which apply to all beam
operators in the DAG.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:BeamRunGoPipelineOperator`
.. seealso::
For more detail on Apache Beam have a look at the reference:
https://beam.apache.org/documentation/
:param go_file: Reference to the Apache Beam pipeline Go source file,
e.g. /local/path/to/main.go or gs://bucket/path/to/main.go.
Exactly one of go_file and launcher_binary must be provided.
:param launcher_binary: Reference to the Apache Beam pipeline Go binary compiled for the launching
platform, e.g. /local/path/to/launcher-main or gs://bucket/path/to/launcher-main.
Exactly one of go_file and launcher_binary must be provided.
:param worker_binary: Reference to the Apache Beam pipeline Go binary compiled for the worker platform,
e.g. /local/path/to/worker-main or gs://bucket/path/to/worker-main.
Needed if the OS or architecture of the workers running the pipeline is different from that
of the platform launching the pipeline. For more information, see the Apache Beam documentation
for Go cross compilation: https://beam.apache.org/documentation/sdks/go-cross-compilation/.
If launcher_binary is not set, providing a worker_binary will have no effect. If launcher_binary is
set and worker_binary is not, worker_binary will default to the value of launcher_binary.
"""
template_fields = [
"go_file",
"launcher_binary",
"worker_binary",
"runner",
"pipeline_options",
"default_pipeline_options",
"dataflow_config",
]
template_fields_renderers = {"dataflow_config": "json", "pipeline_options": "json"}
operator_extra_links = (DataflowJobLink(),)
def __init__(
self,
*,
go_file: str = "",
launcher_binary: str = "",
worker_binary: str = "",
runner: str = "DirectRunner",
default_pipeline_options: dict | None = None,
pipeline_options: dict | None = None,
gcp_conn_id: str = "google_cloud_default",
dataflow_config: DataflowConfiguration | dict | None = None,
**kwargs,
) -> None:
super().__init__(
runner=runner,
default_pipeline_options=default_pipeline_options,
pipeline_options=pipeline_options,
gcp_conn_id=gcp_conn_id,
dataflow_config=dataflow_config,
**kwargs,
)
if self.dataflow_config.impersonation_chain:
self.log.info(
"Impersonation chain parameter is not supported for Apache Beam GO SDK and will be skipped "
"in the execution"
)
self.dataflow_support_impersonation = False
if not exactly_one(go_file, launcher_binary):
raise ValueError("Exactly one of `go_file` and `launcher_binary` must be set")
self.go_file = go_file
self.launcher_binary = launcher_binary
self.worker_binary = worker_binary or launcher_binary
self.pipeline_options.setdefault("labels", {}).update(
{"airflow-version": "v" + version.replace(".", "-").replace("+", "-")}
)
def execute(self, context: Context):
"""Execute the Apache Beam Pipeline."""
(
is_dataflow,
dataflow_job_name,
snake_case_pipeline_options,
process_line_callback,
) = self._init_pipeline_options(format_pipeline_options=True, job_name_variable_key="job_name")
if not self.beam_hook:
raise AirflowException("Beam hook is not defined.")
go_artifact: _GoArtifact = (
_GoFile(file=self.go_file)
if self.go_file
else _GoBinary(launcher=self.launcher_binary, worker=self.worker_binary)
)
with ExitStack() as exit_stack:
if go_artifact.is_located_on_gcs():
gcs_hook = GCSHook(self.gcp_conn_id)
tmp_dir = exit_stack.enter_context(tempfile.TemporaryDirectory(prefix="apache-beam-go"))
go_artifact.download_from_gcs(gcs_hook=gcs_hook, tmp_dir=tmp_dir)
if is_dataflow and self.dataflow_hook:
with self.dataflow_hook.provide_authorized_gcloud():
go_artifact.start_pipeline(
beam_hook=self.beam_hook,
variables=snake_case_pipeline_options,
process_line_callback=process_line_callback,
)
DataflowJobLink.persist(
self,
context,
self.dataflow_config.project_id,
self.dataflow_config.location,
self.dataflow_job_id,
)
if dataflow_job_name and self.dataflow_config.location:
self.dataflow_hook.wait_for_done(
job_name=dataflow_job_name,
location=self.dataflow_config.location,
job_id=self.dataflow_job_id,
multiple_jobs=False,
project_id=self.dataflow_config.project_id,
)
return {"dataflow_job_id": self.dataflow_job_id}
else:
go_artifact.start_pipeline(
beam_hook=self.beam_hook,
variables=snake_case_pipeline_options,
process_line_callback=process_line_callback,
)
def on_kill(self) -> None:
if self.dataflow_hook and self.dataflow_job_id:
self.log.info("Dataflow job with id: `%s` was requested to be cancelled.", self.dataflow_job_id)
self.dataflow_hook.cancel_job(
job_id=self.dataflow_job_id,
project_id=self.dataflow_config.project_id,
)
class _GoArtifact(ABC):
@abstractmethod
def is_located_on_gcs(self) -> bool:
...
@abstractmethod
def download_from_gcs(self, gcs_hook: GCSHook, tmp_dir: str) -> None:
...
@abstractmethod
def start_pipeline(
self,
beam_hook: BeamHook,
variables: dict,
process_line_callback: Callable[[str], None] | None = None,
) -> None:
...
class _GoFile(_GoArtifact):
def __init__(self, file: str) -> None:
self.file = file
self.should_init_go_module = False
def is_located_on_gcs(self) -> bool:
return _object_is_located_on_gcs(self.file)
def download_from_gcs(self, gcs_hook: GCSHook, tmp_dir: str) -> None:
self.file = _download_object_from_gcs(gcs_hook=gcs_hook, uri=self.file, tmp_dir=tmp_dir)
self.should_init_go_module = True
def start_pipeline(
self,
beam_hook: BeamHook,
variables: dict,
process_line_callback: Callable[[str], None] | None = None,
) -> None:
beam_hook.start_go_pipeline(
variables=variables,
go_file=self.file,
process_line_callback=process_line_callback,
should_init_module=self.should_init_go_module,
)
class _GoBinary(_GoArtifact):
def __init__(self, launcher: str, worker: str) -> None:
self.launcher = launcher
self.worker = worker
def is_located_on_gcs(self) -> bool:
return any(_object_is_located_on_gcs(path) for path in (self.launcher, self.worker))
def download_from_gcs(self, gcs_hook: GCSHook, tmp_dir: str) -> None:
binaries_are_equal = self.launcher == self.worker
binaries_to_download = []
if _object_is_located_on_gcs(self.launcher):
binaries_to_download.append("launcher")
if not binaries_are_equal and _object_is_located_on_gcs(self.worker):
binaries_to_download.append("worker")
download_fn = partial(_download_object_from_gcs, gcs_hook=gcs_hook, tmp_dir=tmp_dir)
with ThreadPoolExecutor(max_workers=len(binaries_to_download)) as executor:
futures = {
executor.submit(download_fn, uri=getattr(self, binary), tmp_prefix=f"{binary}-"): binary
for binary in binaries_to_download
}
for future in as_completed(futures):
binary = futures[future]
tmp_path = future.result()
_make_executable(tmp_path)
setattr(self, binary, tmp_path)
if binaries_are_equal:
self.worker = self.launcher
def start_pipeline(
self,
beam_hook: BeamHook,
variables: dict,
process_line_callback: Callable[[str], None] | None = None,
) -> None:
beam_hook.start_go_pipeline_with_binary(
variables=variables,
launcher_binary=self.launcher,
worker_binary=self.worker,
process_line_callback=process_line_callback,
)
def _object_is_located_on_gcs(path: str) -> bool:
return path.lower().startswith("gs://")
def _download_object_from_gcs(gcs_hook: GCSHook, uri: str, tmp_dir: str, tmp_prefix: str = "") -> str:
tmp_name = f"{tmp_prefix}{os.path.basename(uri)}"
tmp_path = os.path.join(tmp_dir, tmp_name)
bucket, prefix = _parse_gcs_url(uri)
gcs_hook.download(bucket_name=bucket, object_name=prefix, filename=tmp_path)
return tmp_path
def _make_executable(path: str) -> None:
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IEXEC)
| 35,757 | 41.8753 | 109 | py |
airflow | airflow-main/airflow/providers/apache/beam/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/beam/triggers/beam.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, AsyncIterator
from airflow.providers.apache.beam.hooks.beam import BeamAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class BeamPipelineTrigger(BaseTrigger):
"""
Trigger to perform checking the pipeline status until it reaches terminate state.
:param variables: Variables passed to the pipeline.
:param py_file: Path to the python file to execute.
:param py_options: Additional options.
:param py_interpreter: Python version of the Apache Beam pipeline. If `None`, this defaults to the
python3. To track python versions supported by beam and related issues
check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache-beam package if it is not installed on your system, or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param runner: Runner on which pipeline will be run. By default, "DirectRunner" is being used.
Other possible options: DataflowRunner, SparkRunner, FlinkRunner, PortableRunner.
See: :class:`~providers.apache.beam.hooks.beam.BeamRunnerType`
See: https://beam.apache.org/documentation/runners/capability-matrix/
"""
def __init__(
self,
variables: dict,
py_file: str,
py_options: list[str] | None = None,
py_interpreter: str = "python3",
py_requirements: list[str] | None = None,
py_system_site_packages: bool = False,
runner: str = "DirectRunner",
):
super().__init__()
self.variables = variables
self.py_file = py_file
self.py_options = py_options
self.py_interpreter = py_interpreter
self.py_requirements = py_requirements
self.py_system_site_packages = py_system_site_packages
self.runner = runner
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes BeamPipelineTrigger arguments and classpath."""
return (
"airflow.providers.apache.beam.triggers.beam.BeamPipelineTrigger",
{
"variables": self.variables,
"py_file": self.py_file,
"py_options": self.py_options,
"py_interpreter": self.py_interpreter,
"py_requirements": self.py_requirements,
"py_system_site_packages": self.py_system_site_packages,
"runner": self.runner,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]: # type: ignore[override]
"""Gets current pipeline status and yields a TriggerEvent."""
hook = self._get_async_hook()
while True:
try:
return_code = await hook.start_python_pipeline_async(
variables=self.variables,
py_file=self.py_file,
py_options=self.py_options,
py_interpreter=self.py_interpreter,
py_requirements=self.py_requirements,
py_system_site_packages=self.py_system_site_packages,
)
if return_code == 0:
yield TriggerEvent(
{
"status": "success",
"message": "Pipeline has finished SUCCESSFULLY",
}
)
return
else:
yield TriggerEvent({"status": "error", "message": "Operation failed"})
return
except Exception as e:
self.log.exception("Exception occurred while checking for pipeline state")
yield TriggerEvent({"status": "error", "message": str(e)})
return
def _get_async_hook(self) -> BeamAsyncHook:
return BeamAsyncHook(runner=self.runner)
| 5,107 | 42.65812 | 105 | py |
airflow | airflow-main/airflow/providers/apache/beam/triggers/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/beam/hooks/beam.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a Apache Beam Hook."""
from __future__ import annotations
import asyncio
import contextlib
import copy
import functools
import json
import logging
import os
import select
import shlex
import shutil
import subprocess
import tempfile
import textwrap
from typing import Callable
from packaging.version import Version
from airflow.exceptions import AirflowConfigException, AirflowException
from airflow.hooks.base import BaseHook
from airflow.providers.google.go_module_utils import init_module, install_dependencies
from airflow.utils.python_virtualenv import prepare_virtualenv
class BeamRunnerType:
"""
Helper class for listing runner types.
For more information about runners see: https://beam.apache.org/documentation/
"""
DataflowRunner = "DataflowRunner"
DirectRunner = "DirectRunner"
SparkRunner = "SparkRunner"
FlinkRunner = "FlinkRunner"
SamzaRunner = "SamzaRunner"
NemoRunner = "NemoRunner"
JetRunner = "JetRunner"
Twister2Runner = "Twister2Runner"
def beam_options_to_args(options: dict) -> list[str]:
"""
Returns a formatted pipeline options from a dictionary of arguments.
The logic of this method should be compatible with Apache Beam:
https://github.com/apache/beam/blob/b56740f0e8cd80c2873412847d0b336837429fb9/sdks/python/
apache_beam/options/pipeline_options.py#L230-L251
:param options: Dictionary with options
:return: List of arguments
"""
if not options:
return []
args: list[str] = []
for attr, value in options.items():
if value is None or (isinstance(value, bool) and value):
args.append(f"--{attr}")
elif isinstance(value, list):
args.extend([f"--{attr}={v}" for v in value])
else:
args.append(f"--{attr}={value}")
return args
def process_fd(
proc,
fd,
log: logging.Logger,
process_line_callback: Callable[[str], None] | None = None,
):
"""
Prints output to logs.
:param proc: subprocess.
:param fd: File descriptor.
:param process_line_callback: Optional callback which can be used to process
stdout and stderr to detect job id.
:param log: logger.
"""
if fd not in (proc.stdout, proc.stderr):
raise Exception("No data in stderr or in stdout.")
fd_to_log = {proc.stderr: log.warning, proc.stdout: log.info}
func_log = fd_to_log[fd]
while True:
line = fd.readline().decode()
if not line:
return
if process_line_callback:
process_line_callback(line)
func_log(line.rstrip("\n"))
def run_beam_command(
cmd: list[str],
log: logging.Logger,
process_line_callback: Callable[[str], None] | None = None,
working_directory: str | None = None,
) -> None:
"""
Function responsible for running pipeline command in subprocess.
:param cmd: Parts of the command to be run in subprocess
:param process_line_callback: Optional callback which can be used to process
stdout and stderr to detect job id
:param working_directory: Working directory
:param log: logger.
"""
log.info("Running command: %s", " ".join(shlex.quote(c) for c in cmd))
proc = subprocess.Popen(
cmd,
cwd=working_directory,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
)
# Waits for Apache Beam pipeline to complete.
log.info("Start waiting for Apache Beam process to complete.")
reads = [proc.stderr, proc.stdout]
while True:
# Wait for at least one available fd.
readable_fds, _, _ = select.select(reads, [], [], 5)
if readable_fds is None:
log.info("Waiting for Apache Beam process to complete.")
continue
for readable_fd in readable_fds:
process_fd(proc, readable_fd, log, process_line_callback)
if proc.poll() is not None:
break
# Corner case: check if more output was created between the last read and the process termination
for readable_fd in reads:
process_fd(proc, readable_fd, log, process_line_callback)
log.info("Process exited with return code: %s", proc.returncode)
if proc.returncode != 0:
raise AirflowException(f"Apache Beam process failed with return code {proc.returncode}")
class BeamHook(BaseHook):
"""
Hook for Apache Beam.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param runner: Runner type
"""
def __init__(
self,
runner: str,
) -> None:
self.runner = runner
super().__init__()
def _start_pipeline(
self,
variables: dict,
command_prefix: list[str],
process_line_callback: Callable[[str], None] | None = None,
working_directory: str | None = None,
) -> None:
cmd = command_prefix + [
f"--runner={self.runner}",
]
if variables:
cmd.extend(beam_options_to_args(variables))
run_beam_command(
cmd=cmd,
process_line_callback=process_line_callback,
working_directory=working_directory,
log=self.log,
)
def start_python_pipeline(
self,
variables: dict,
py_file: str,
py_options: list[str],
py_interpreter: str = "python3",
py_requirements: list[str] | None = None,
py_system_site_packages: bool = False,
process_line_callback: Callable[[str], None] | None = None,
):
"""
Starts Apache Beam python pipeline.
:param variables: Variables passed to the pipeline.
:param py_file: Path to the python file to execute.
:param py_options: Additional options.
:param py_interpreter: Python version of the Apache Beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache-beam package if it is not installed on your system, or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
"""
if "labels" in variables:
variables["labels"] = [f"{key}={value}" for key, value in variables["labels"].items()]
with contextlib.ExitStack() as exit_stack:
if py_requirements is not None:
if not py_requirements and not py_system_site_packages:
warning_invalid_environment = textwrap.dedent(
"""\
Invalid method invocation. You have disabled inclusion of system packages and empty
list required for installation, so it is not possible to create a valid virtual
environment. In the virtual environment, apache-beam package must be installed for
your job to be executed.
To fix this problem:
* install apache-beam on the system, then set parameter py_system_site_packages
to True,
* add apache-beam to the list of required packages in parameter py_requirements.
"""
)
raise AirflowException(warning_invalid_environment)
tmp_dir = exit_stack.enter_context(tempfile.TemporaryDirectory(prefix="apache-beam-venv"))
py_interpreter = prepare_virtualenv(
venv_directory=tmp_dir,
python_bin=py_interpreter,
system_site_packages=py_system_site_packages,
requirements=py_requirements,
)
command_prefix = [py_interpreter] + py_options + [py_file]
beam_version = (
subprocess.check_output(
[py_interpreter, "-c", "import apache_beam; print(apache_beam.__version__)"]
)
.decode()
.strip()
)
self.log.info("Beam version: %s", beam_version)
impersonate_service_account = variables.get("impersonate_service_account")
if impersonate_service_account:
if Version(beam_version) < Version("2.39.0") or True:
raise AirflowException(
"The impersonateServiceAccount option requires Apache Beam 2.39.0 or newer."
)
self._start_pipeline(
variables=variables,
command_prefix=command_prefix,
process_line_callback=process_line_callback,
)
def start_java_pipeline(
self,
variables: dict,
jar: str,
job_class: str | None = None,
process_line_callback: Callable[[str], None] | None = None,
) -> None:
"""
Starts Apache Beam Java pipeline.
:param variables: Variables passed to the job.
:param jar: Name of the jar for the pipeline
:param job_class: Name of the java class for the pipeline.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
"""
if "labels" in variables:
variables["labels"] = json.dumps(variables["labels"], separators=(",", ":"))
command_prefix = ["java", "-cp", jar, job_class] if job_class else ["java", "-jar", jar]
self._start_pipeline(
variables=variables,
command_prefix=command_prefix,
process_line_callback=process_line_callback,
)
def start_go_pipeline(
self,
variables: dict,
go_file: str,
process_line_callback: Callable[[str], None] | None = None,
should_init_module: bool = False,
) -> None:
"""
Starts Apache Beam Go pipeline with a source file.
:param variables: Variables passed to the job.
:param go_file: Path to the Go file with your beam pipeline.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
:param should_init_module: If False (default), will just execute a `go run` command. If True, will
init a module and dependencies with a ``go mod init`` and ``go mod tidy``, useful when pulling
source with GCSHook.
:return:
"""
if shutil.which("go") is None:
raise AirflowConfigException(
"You need to have Go installed to run beam go pipeline. See https://go.dev/doc/install "
"installation guide. If you are running airflow in Docker see more info at "
"'https://airflow.apache.org/docs/docker-stack/recipes.html'."
)
if "labels" in variables:
variables["labels"] = json.dumps(variables["labels"], separators=(",", ":"))
working_directory = os.path.dirname(go_file)
basename = os.path.basename(go_file)
if should_init_module:
init_module("main", working_directory)
install_dependencies(working_directory)
command_prefix = ["go", "run", basename]
self._start_pipeline(
variables=variables,
command_prefix=command_prefix,
process_line_callback=process_line_callback,
working_directory=working_directory,
)
def start_go_pipeline_with_binary(
self,
variables: dict,
launcher_binary: str,
worker_binary: str,
process_line_callback: Callable[[str], None] | None = None,
) -> None:
"""
Starts Apache Beam Go pipeline with an executable binary.
:param variables: Variables passed to the job.
:param launcher_binary: Path to the binary compiled for the launching platform.
:param worker_binary: Path to the binary compiled for the worker platform.
:param process_line_callback: (optional) Callback that can be used to process each line of
the stdout and stderr file descriptors.
"""
job_variables = copy.deepcopy(variables)
if "labels" in job_variables:
job_variables["labels"] = json.dumps(job_variables["labels"], separators=(",", ":"))
job_variables["worker_binary"] = worker_binary
command_prefix = [launcher_binary]
self._start_pipeline(
variables=job_variables,
command_prefix=command_prefix,
process_line_callback=process_line_callback,
)
class BeamAsyncHook(BeamHook):
"""
Asynchronous hook for Apache Beam.
:param runner: Runner type.
"""
def __init__(
self,
runner: str,
) -> None:
self.runner = runner
super().__init__(runner=self.runner)
@staticmethod
async def _create_tmp_dir(prefix: str) -> str:
"""Helper method to create temporary directory."""
# Creating separate thread to create temporary directory
loop = asyncio.get_running_loop()
partial_func = functools.partial(tempfile.mkdtemp, prefix=prefix)
tmp_dir = await loop.run_in_executor(None, partial_func)
return tmp_dir
@staticmethod
async def _cleanup_tmp_dir(tmp_dir: str) -> None:
"""
Helper method to delete temporary directory after finishing work with it.
Is uses `rmtree` method to recursively remove the temporary directory.
"""
shutil.rmtree(tmp_dir)
async def start_python_pipeline_async(
self,
variables: dict,
py_file: str,
py_options: list[str] | None = None,
py_interpreter: str = "python3",
py_requirements: list[str] | None = None,
py_system_site_packages: bool = False,
):
"""
Starts Apache Beam python pipeline.
:param variables: Variables passed to the pipeline.
:param py_file: Path to the python file to execute.
:param py_options: Additional options.
:param py_interpreter: Python version of the Apache Beam pipeline.
If None, this defaults to the python3.
To track python versions supported by beam and related
issues check: https://issues.apache.org/jira/browse/BEAM-1251
:param py_requirements: Additional python package(s) to install.
If a value is passed to this parameter, a new virtual environment has been created with
additional packages installed.
You could also install the apache-beam package if it is not installed on your system, or you want
to use a different version.
:param py_system_site_packages: Whether to include system_site_packages in your virtualenv.
See virtualenv documentation for more information.
This option is only relevant if the ``py_requirements`` parameter is not None.
"""
if "labels" in variables:
variables["labels"] = [f"{key}={value}" for key, value in variables["labels"].items()]
# Creating temporary directory
tmp_dir = await self._create_tmp_dir(prefix="apache-beam-venv")
async with contextlib.AsyncExitStack() as exit_stack:
if py_requirements is not None:
if not py_requirements and not py_system_site_packages:
warning_invalid_environment = textwrap.dedent(
"""\
Invalid method invocation. You have disabled inclusion of system packages and empty
list required for installation, so it is not possible to create a valid virtual
environment. In the virtual environment, apache-beam package must be installed for
your job to be executed.
To fix this problem:
* install apache-beam on the system, then set parameter py_system_site_packages
to True,
* add apache-beam to the list of required packages in parameter py_requirements.
"""
)
raise AirflowException(warning_invalid_environment)
# Pushing asynchronous callback to ensure the cleanup of the temporary
# directory when the asynchronous context is exited
exit_stack.push_async_callback(self._cleanup_tmp_dir, tmp_dir)
py_interpreter = prepare_virtualenv(
venv_directory=tmp_dir,
python_bin=py_interpreter,
system_site_packages=py_system_site_packages,
requirements=py_requirements,
)
command_prefix: list[str] = [py_interpreter] + (py_options or []) + [py_file]
beam_version = (
subprocess.check_output(
[py_interpreter, "-c", "import apache_beam; print(apache_beam.__version__)"]
)
.decode()
.strip()
)
self.log.info("Beam version: %s", beam_version)
impersonate_service_account = variables.get("impersonate_service_account")
if impersonate_service_account:
if Version(beam_version) < Version("2.39.0") or True:
raise AirflowException(
"The impersonateServiceAccount option requires Apache Beam 2.39.0 or newer."
)
return_code = await self.start_pipeline_async(
variables=variables,
command_prefix=command_prefix,
)
return return_code
async def start_pipeline_async(
self,
variables: dict,
command_prefix: list[str],
working_directory: str | None = None,
) -> int:
cmd = command_prefix + [
f"--runner={self.runner}",
]
if variables:
cmd.extend(beam_options_to_args(variables))
return await self.run_beam_command_async(
cmd=cmd,
working_directory=working_directory,
log=self.log,
)
async def run_beam_command_async(
self,
cmd: list[str],
log: logging.Logger,
working_directory: str | None = None,
) -> int:
"""
Function responsible for running pipeline command in subprocess.
:param cmd: Parts of the command to be run in subprocess
:param working_directory: Working directory
:param log: logger.
"""
cmd_str_representation = " ".join(shlex.quote(c) for c in cmd)
log.info("Running command: %s", cmd_str_representation)
# Creating a separate asynchronous process
process = await asyncio.create_subprocess_shell(
cmd_str_representation,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=working_directory,
)
# Waits for Apache Beam pipeline to complete.
log.info("Start waiting for Apache Beam process to complete.")
# Creating separate threads for stdout and stderr
stdout_task = asyncio.create_task(self.read_logs(process.stdout))
stderr_task = asyncio.create_task(self.read_logs(process.stderr))
# Waiting for the both tasks to complete
await asyncio.gather(stdout_task, stderr_task)
# Wait for the process to complete and return return_code
return_code = await process.wait()
log.info("Process exited with return code: %s", return_code)
if return_code != 0:
raise AirflowException(f"Apache Beam process failed with return code {return_code}")
return return_code
async def read_logs(self, stream_reader):
while True:
line = await stream_reader.readline()
if not line:
break
decoded_line = line.decode().strip()
self.log.info(decoded_line)
| 21,721 | 37.242958 | 109 | py |
airflow | airflow-main/airflow/providers/apache/beam/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/opsgenie/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "5.1.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-opsgenie:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,533 | 35.52381 | 117 | py |
airflow | airflow-main/airflow/providers/opsgenie/operators/opsgenie.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.opsgenie.hooks.opsgenie import OpsgenieAlertHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class OpsgenieCreateAlertOperator(BaseOperator):
"""
This operator allows you to post alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This operator sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpsgenieCreateAlertOperator`
:param opsgenie_conn_id: The name of the Opsgenie connection to use
:param message: The Message of the Opsgenie alert (templated)
:param alias: Client-defined identifier of the alert (templated)
:param description: Description field of the alert (templated)
:param responders: Teams, users, escalations and schedules that
the alert will be routed to send notifications.
:param visible_to: Teams and users that the alert will become visible
to without sending any notification.
:param actions: Custom actions that will be available for the alert.
:param tags: Tags of the alert.
:param details: Map of key-value pairs to use as custom properties of the alert.
:param entity: Entity field of the alert that is
generally used to specify which domain alert is related to. (templated)
:param source: Source field of the alert. Default value is
IP address of the incoming request.
:param priority: Priority level of the alert. Default value is P3. (templated)
:param user: Display name of the request owner.
:param note: Additional note that will be added while creating the alert. (templated)
"""
template_fields: Sequence[str] = ("message", "alias", "description", "entity", "priority", "note")
def __init__(
self,
*,
message: str,
opsgenie_conn_id: str = "opsgenie_default",
alias: str | None = None,
description: str | None = None,
responders: list[dict] | None = None,
visible_to: list[dict] | None = None,
actions: list[str] | None = None,
tags: list[str] | None = None,
details: dict | None = None,
entity: str | None = None,
source: str | None = None,
priority: str | None = None,
user: str | None = None,
note: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.message = message
self.opsgenie_conn_id = opsgenie_conn_id
self.alias = alias
self.description = description
self.responders = responders
self.visible_to = visible_to
self.actions = actions
self.tags = tags
self.details = details
self.entity = entity
self.source = source
self.priority = priority
self.user = user
self.note = note
self.hook: OpsgenieAlertHook | None = None
def _build_opsgenie_payload(self) -> dict[str, Any]:
"""
Construct the Opsgenie JSON payload.
All relevant parameters are combined here to a valid Opsgenie JSON payload.
:return: Opsgenie payload (dict) to send
"""
payload = {}
for key in [
"message",
"alias",
"description",
"responders",
"visible_to",
"actions",
"tags",
"details",
"entity",
"source",
"priority",
"user",
"note",
]:
val = getattr(self, key)
if val:
payload[key] = val
return payload
def execute(self, context: Context) -> None:
"""Call the OpsgenieAlertHook to post message."""
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.create_alert(self._build_opsgenie_payload())
class OpsgenieCloseAlertOperator(BaseOperator):
"""
This operator allows you to close alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This operator sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpsgenieCloseAlertOperator`
:param opsgenie_conn_id: The name of the Opsgenie connection to use
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param user: display name of the request owner
:param note: additional note that will be added while creating the alert
:param source: source field of the alert. Default value is IP address of the incoming request
:param close_alert_kwargs: additional params to pass
"""
def __init__(
self,
*,
identifier: str,
opsgenie_conn_id: str = "opsgenie_default",
identifier_type: str | None = None,
user: str | None = None,
note: str | None = None,
source: str | None = None,
close_alert_kwargs: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.opsgenie_conn_id = opsgenie_conn_id
self.identifier = identifier
self.identifier_type = identifier_type
self.user = user
self.note = note
self.source = source
self.close_alert_kwargs = close_alert_kwargs
self.hook: OpsgenieAlertHook | None = None
def _build_opsgenie_close_alert_payload(self) -> dict[str, Any]:
"""
Construct the Opsgenie JSON payload.
All relevant parameters are combined here to a valid Opsgenie JSON payload.
:return: Opsgenie close alert payload (dict) to send
"""
payload = {}
for key in [
"user",
"note",
"source",
]:
val = getattr(self, key)
if val:
payload[key] = val
return payload
def execute(self, context: Context) -> None:
"""Call the OpsgenieAlertHook to close alert."""
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.close_alert(
identifier=self.identifier,
identifier_type=self.identifier_type,
payload=self._build_opsgenie_close_alert_payload(),
**(self.close_alert_kwargs or {}),
)
class OpsgenieDeleteAlertOperator(BaseOperator):
"""
This operator allows you to delete alerts in Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This operator sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this operator.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:OpsgenieDeleteAlertOperator`
:param opsgenie_conn_id: The name of the Opsgenie connection to use
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param user: Display name of the request owner
:param source: Display name of the request source
"""
template_fields: Sequence[str] = ("identifier",)
def __init__(
self,
*,
identifier: str,
opsgenie_conn_id: str = "opsgenie_default",
identifier_type: str | None = None,
user: str | None = None,
source: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.opsgenie_conn_id = opsgenie_conn_id
self.identifier = identifier
self.identifier_type = identifier_type
self.user = user
self.source = source
def execute(self, context: Context) -> None:
"""Call the OpsgenieAlertHook to delete alert."""
hook = OpsgenieAlertHook(self.opsgenie_conn_id)
hook.delete_alert(
identifier=self.identifier,
identifier_type=self.identifier_type,
user=self.user,
source=self.source,
)
| 9,793 | 35.274074 | 102 | py |
airflow | airflow-main/airflow/providers/opsgenie/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/opsgenie/hooks/opsgenie.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from opsgenie_sdk import (
AlertApi,
ApiClient,
CloseAlertPayload,
Configuration,
CreateAlertPayload,
OpenApiException,
SuccessResponse,
)
from airflow.hooks.base import BaseHook
class OpsgenieAlertHook(BaseHook):
"""
This hook allows you to post alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This hook sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this hook.
:param opsgenie_conn_id: The name of the Opsgenie connection to use
"""
conn_name_attr = "opsgenie_conn_id"
default_conn_name = "opsgenie_default"
conn_type = "opsgenie"
hook_name = "Opsgenie"
def __init__(self, opsgenie_conn_id: str = "opsgenie_default") -> None:
super().__init__() # type: ignore[misc]
self.conn_id = opsgenie_conn_id
configuration = Configuration()
conn = self.get_connection(self.conn_id)
configuration.api_key["Authorization"] = conn.password
configuration.host = conn.host or "https://api.opsgenie.com"
self.alert_api_instance = AlertApi(ApiClient(configuration))
def _get_api_key(self) -> str:
"""
Get the API key from the connection.
:return: API key
"""
conn = self.get_connection(self.conn_id)
return conn.password
def get_conn(self) -> AlertApi:
"""
Get the underlying AlertApi client.
:return: AlertApi client
"""
return self.alert_api_instance
def create_alert(self, payload: dict | None = None) -> SuccessResponse:
"""
Create an alert on Opsgenie.
:param payload: Opsgenie API Create Alert payload values
See https://docs.opsgenie.com/docs/alert-api#section-create-alert
:return: api response
"""
payload = payload or {}
try:
create_alert_payload = CreateAlertPayload(**payload)
api_response = self.alert_api_instance.create_alert(create_alert_payload)
return api_response
except OpenApiException as e:
self.log.exception("Exception when sending alert to opsgenie with payload: %s", payload)
raise e
def close_alert(
self,
identifier: str,
identifier_type: str | None = "id",
payload: dict | None = None,
**kwargs: dict | None,
) -> SuccessResponse:
"""
Close an alert in Opsgenie.
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param payload: Request payload of closing alert action.
see https://github.com/opsgenie/opsgenie-python-sdk/blob/master/docs/AlertApi.md#close_alert
:param kwargs: params to pass to the function
:return: SuccessResponse
If the method is called asynchronously,
returns the request thread.
"""
payload = payload or {}
try:
close_alert_payload = CloseAlertPayload(**payload)
api_response = self.alert_api_instance.close_alert(
identifier=identifier,
identifier_type=identifier_type,
close_alert_payload=close_alert_payload,
**kwargs,
)
return api_response
except OpenApiException as e:
self.log.exception("Exception when closing alert in opsgenie with payload: %s", payload)
raise e
def delete_alert(
self,
identifier: str,
identifier_type: str | None = None,
user: str | None = None,
source: str | None = None,
) -> SuccessResponse:
"""
Delete an alert in Opsgenie.
:param identifier: Identifier of alert which could be alert id, tiny id or alert alias.
:param identifier_type: Type of the identifier that is provided as an in-line parameter.
Possible values are 'id', 'alias' or 'tiny'
:param user: Display name of the request owner.
:param source: Display name of the request source
:return: SuccessResponse
"""
try:
api_response = self.alert_api_instance.delete_alert(
identifier=identifier,
identifier_type=identifier_type,
user=user,
source=source,
)
return api_response
except OpenApiException as e:
self.log.exception("Exception when calling AlertApi->delete_alert: %s\n", e)
raise e
| 5,701 | 34.861635 | 104 | py |
airflow | airflow-main/airflow/providers/opsgenie/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/papermill/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-papermill:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,534 | 35.547619 | 118 | py |
airflow | airflow-main/airflow/providers/papermill/operators/papermill.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, ClassVar, Collection, Optional, Sequence
import attr
import papermill as pm
from airflow.lineage.entities import File
from airflow.models import BaseOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
@attr.s(auto_attribs=True)
class NoteBook(File):
"""Jupyter notebook."""
# For compatibility with Airflow 2.3:
# 1. Use predefined set because `File.template_fields` introduced in Airflow 2.4
# 2. Use old styled annotations because `cattrs` doesn't work well with PEP 604.
template_fields: ClassVar[Collection[str]] = {
"parameters",
*(File.template_fields if hasattr(File, "template_fields") else {"url"}),
}
type_hint: Optional[str] = "jupyter_notebook" # noqa: UP007
parameters: Optional[dict] = {} # noqa: UP007
meta_schema: str = __name__ + ".NoteBook"
class PapermillOperator(BaseOperator):
"""
Executes a jupyter notebook through papermill that is annotated with parameters.
:param input_nb: input notebook, either path or NoteBook inlet.
:param output_nb: output notebook, either path or NoteBook outlet.
:param parameters: the notebook parameters to set
:param kernel_name: (optional) name of kernel to execute the notebook against
(ignores kernel name in the notebook document metadata)
"""
supports_lineage = True
template_fields: Sequence[str] = ("input_nb", "output_nb", "parameters", "kernel_name", "language_name")
def __init__(
self,
*,
input_nb: str | NoteBook | None = None,
output_nb: str | NoteBook | None = None,
parameters: dict | None = None,
kernel_name: str | None = None,
language_name: str | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.parameters = parameters
if not input_nb:
raise ValueError("Input notebook is not specified")
elif not isinstance(input_nb, NoteBook):
self.input_nb = NoteBook(url=input_nb, parameters=self.parameters)
else:
self.input_nb = input_nb
if not output_nb:
raise ValueError("Output notebook is not specified")
elif not isinstance(output_nb, NoteBook):
self.output_nb = NoteBook(url=output_nb)
else:
self.output_nb = output_nb
self.kernel_name = kernel_name
self.language_name = language_name
self.inlets.append(self.input_nb)
self.outlets.append(self.output_nb)
def execute(self, context: Context):
pm.execute_notebook(
self.input_nb.url,
self.output_nb.url,
parameters=self.input_nb.parameters,
progress_bar=False,
report_mode=True,
kernel_name=self.kernel_name,
language=self.language_name,
)
| 3,714 | 33.082569 | 108 | py |
airflow | airflow-main/airflow/providers/papermill/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/influxdb/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "2.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-influxdb:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,533 | 35.52381 | 117 | py |
airflow | airflow-main/airflow/providers/influxdb/operators/influxdb.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.influxdb.hooks.influxdb import InfluxDBHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class InfluxDBOperator(BaseOperator):
"""
Executes sql code in a specific InfluxDB database.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:InfluxDBOperator`
:param sql: the sql code to be executed. Can receive a str representing a
sql statement
:param influxdb_conn_id: Reference to :ref:`Influxdb connection id <howto/connection:influxdb>`.
"""
template_fields: Sequence[str] = ("sql",)
def __init__(
self,
*,
sql: str,
influxdb_conn_id: str = "influxdb_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.influxdb_conn_id = influxdb_conn_id
self.sql = sql
def execute(self, context: Context) -> None:
self.log.info("Executing: %s", self.sql)
self.hook = InfluxDBHook(conn_id=self.influxdb_conn_id)
self.hook.query(self.sql)
| 1,999 | 32.898305 | 100 | py |
airflow | airflow-main/airflow/providers/influxdb/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/influxdb/hooks/influxdb.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module allows to connect to a InfluxDB database.
.. spelling:word-list::
FluxTable
"""
from __future__ import annotations
import pandas as pd
from influxdb_client import InfluxDBClient
from influxdb_client.client.flux_table import FluxTable
from influxdb_client.client.write.point import Point
from influxdb_client.client.write_api import SYNCHRONOUS
from airflow.hooks.base import BaseHook
from airflow.models import Connection
class InfluxDBHook(BaseHook):
"""Interact with InfluxDB.
Performs a connection to InfluxDB and retrieves client.
:param influxdb_conn_id: Reference to :ref:`Influxdb connection id <howto/connection:influxdb>`.
"""
conn_name_attr = "influxdb_conn_id"
default_conn_name = "influxdb_default"
conn_type = "influxdb"
hook_name = "Influxdb"
def __init__(self, conn_id: str = default_conn_name, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.influxdb_conn_id = conn_id
self.connection = kwargs.pop("connection", None)
self.client = None
self.extras: dict = {}
self.uri = None
self.org_name = None
def get_client(self, uri, token, org_name):
return InfluxDBClient(url=uri, token=token, org=org_name)
def get_uri(self, conn: Connection):
"""Add additional parameters to the URI based on InfluxDB host requirements."""
conn_scheme = "https" if conn.schema is None else conn.schema
conn_port = 7687 if conn.port is None else conn.port
return f"{conn_scheme}://{conn.host}:{conn_port}"
def get_conn(self) -> InfluxDBClient:
"""Initiate a new InfluxDB connection with token and organization name."""
self.connection = self.get_connection(self.influxdb_conn_id)
self.extras = self.connection.extra_dejson.copy()
self.uri = self.get_uri(self.connection)
self.log.info("URI: %s", self.uri)
if self.client is not None:
return self.client
token = self.connection.extra_dejson.get("token")
self.org_name = self.connection.extra_dejson.get("org_name")
self.log.info("URI: %s", self.uri)
self.log.info("Organization: %s", self.org_name)
self.client = self.get_client(self.uri, token, self.org_name)
return self.client
def query(self, query) -> list[FluxTable]:
"""Run the query.
Note: The bucket name should be included in the query.
:param query: InfluxDB query
:return: List
"""
client = self.get_conn()
query_api = client.query_api()
return query_api.query(query)
def query_to_df(self, query) -> pd.DataFrame:
"""Run the query and return a pandas dataframe.
Note: The bucket name should be included in the query.
:param query: InfluxDB query
:return: pd.DataFrame
"""
client = self.get_conn()
query_api = client.query_api()
return query_api.query_data_frame(query)
def write(self, bucket_name, point_name, tag_name, tag_value, field_name, field_value, synchronous=False):
"""Write a Point to the bucket specified.
Example: ``Point("my_measurement").tag("location", "Prague").field("temperature", 25.3)``
"""
# By defaults its Batching
if synchronous:
write_api = self.client.write_api(write_options=SYNCHRONOUS)
else:
write_api = self.client.write_api()
p = Point(point_name).tag(tag_name, tag_value).field(field_name, field_value)
write_api.write(bucket=bucket_name, record=p)
def create_organization(self, name):
"""Create a new organization."""
return self.client.organizations_api().create_organization(name=name)
def delete_organization(self, org_id):
"""Delete an organization by ID."""
return self.client.organizations_api().delete_organization(org_id=org_id)
def create_bucket(self, bucket_name, description, org_id, retention_rules=None):
"""Create a bucket for an organization."""
return self.client.buckets_api().create_bucket(
bucket_name=bucket_name, description=description, org_id=org_id, retention_rules=None
)
def find_bucket_id_by_name(self, bucket_name):
"""Get bucket ID by name."""
bucket = self.client.buckets_api().find_bucket_by_name(bucket_name)
return "" if bucket is None else bucket.id
def delete_bucket(self, bucket_name):
"""Delete bucket by name."""
bucket = self.find_bucket_id_by_name(bucket_name)
return self.client.buckets_api().delete_bucket(bucket)
| 5,468 | 34.512987 | 110 | py |
airflow | airflow-main/airflow/providers/influxdb/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/presto/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "5.1.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-presto:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,531 | 35.47619 | 115 | py |
airflow | airflow-main/airflow/providers/presto/transfers/gcs_to_presto.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage to Presto operator."""
from __future__ import annotations
import csv
import json
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Iterable, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.presto.hooks.presto import PrestoHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToPrestoOperator(BaseOperator):
"""
Loads a csv file from Google Cloud Storage into a Presto table.
Assumptions:
1. CSV file should not have headers
2. Presto table with requisite columns is already created
3. Optionally, a separate JSON file with headers or list of headers can be provided
:param source_bucket: Source GCS bucket that contains the csv
:param source_object: csv file including the path
:param presto_table: presto table to upload the data
:param presto_conn_id: destination presto connection
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the Google Cloud Storage service.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_object",
"presto_table",
)
def __init__(
self,
*,
source_bucket: str,
source_object: str,
presto_table: str,
presto_conn_id: str = "presto_default",
gcp_conn_id: str = "google_cloud_default",
schema_fields: Iterable[str] | None = None,
schema_object: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.presto_table = presto_table
self.presto_conn_id = presto_conn_id
self.gcp_conn_id = gcp_conn_id
self.schema_fields = schema_fields
self.schema_object = schema_object
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
presto_hook = PrestoHook(presto_conn_id=self.presto_conn_id)
with NamedTemporaryFile("w+") as temp_file:
self.log.info("Downloading data from %s", self.source_object)
gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.source_object,
filename=temp_file.name,
)
data = csv.reader(temp_file)
rows = (tuple(row) for row in data)
self.log.info("Inserting data into %s", self.presto_table)
if self.schema_fields:
presto_hook.insert_rows(table=self.presto_table, rows=rows, target_fields=self.schema_fields)
elif self.schema_object:
blob = gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.schema_object,
)
schema_fields = json.loads(blob.decode("utf-8"))
presto_hook.insert_rows(table=self.presto_table, rows=rows, target_fields=schema_fields)
else:
presto_hook.insert_rows(table=self.presto_table, rows=rows)
| 4,836 | 39.647059 | 109 | py |
airflow | airflow-main/airflow/providers/presto/transfers/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/presto/hooks/presto.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import os
from typing import Any, Callable, Iterable, Mapping
import prestodb
from prestodb.exceptions import DatabaseError
from prestodb.transaction import IsolationLevel
from airflow import AirflowException
from airflow.configuration import conf
from airflow.models import Connection
from airflow.providers.common.sql.hooks.sql import DbApiHook
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING, DEFAULT_FORMAT_PREFIX
def generate_presto_client_info() -> str:
"""Return json string with dag_id, task_id, execution_date and try_number."""
context_var = {
format_map["default"].replace(DEFAULT_FORMAT_PREFIX, ""): os.environ.get(
format_map["env_var_format"], ""
)
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()
}
task_info = {
"dag_id": context_var["dag_id"],
"task_id": context_var["task_id"],
"execution_date": context_var["execution_date"],
"try_number": context_var["try_number"],
"dag_run_id": context_var["dag_run_id"],
"dag_owner": context_var["dag_owner"],
}
return json.dumps(task_info, sort_keys=True)
class PrestoException(Exception):
"""Presto exception."""
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == "false":
return False
elif value.lower() == "true":
return True
return value
class PrestoHook(DbApiHook):
"""
Interact with Presto through prestodb.
>>> ph = PrestoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = "presto_conn_id"
default_conn_name = "presto_default"
conn_type = "presto"
hook_name = "Presto"
placeholder = "?"
def get_conn(self) -> Connection:
"""Returns a connection object."""
db = self.get_connection(self.presto_conn_id) # type: ignore[attr-defined]
extra = db.extra_dejson
auth = None
if db.password and extra.get("auth") == "kerberos":
raise AirflowException("Kerberos authorization doesn't support password.")
elif db.password:
auth = prestodb.auth.BasicAuthentication(db.login, db.password)
elif extra.get("auth") == "kerberos":
auth = prestodb.auth.KerberosAuthentication(
config=extra.get("kerberos__config", os.environ.get("KRB5_CONFIG")),
service_name=extra.get("kerberos__service_name"),
mutual_authentication=_boolify(extra.get("kerberos__mutual_authentication", False)),
force_preemptive=_boolify(extra.get("kerberos__force_preemptive", False)),
hostname_override=extra.get("kerberos__hostname_override"),
sanitize_mutual_error_response=_boolify(
extra.get("kerberos__sanitize_mutual_error_response", True)
),
principal=extra.get("kerberos__principal", conf.get("kerberos", "principal")),
delegate=_boolify(extra.get("kerberos__delegate", False)),
ca_bundle=extra.get("kerberos__ca_bundle"),
)
http_headers = {"X-Presto-Client-Info": generate_presto_client_info()}
presto_conn = prestodb.dbapi.connect(
host=db.host,
port=db.port,
user=db.login,
source=db.extra_dejson.get("source", "airflow"),
http_headers=http_headers,
http_scheme=db.extra_dejson.get("protocol", "http"),
catalog=db.extra_dejson.get("catalog", "hive"),
schema=db.schema,
auth=auth,
isolation_level=self.get_isolation_level(), # type: ignore[func-returns-value]
)
if extra.get("verify") is not None:
# Unfortunately verify parameter is available via public API.
# The PR is merged in the presto library, but has not been released.
# See: https://github.com/prestosql/presto-python-client/pull/31
presto_conn._http_session.verify = _boolify(extra["verify"])
return presto_conn
def get_isolation_level(self) -> Any:
"""Returns an isolation level."""
db = self.get_connection(self.presto_conn_id) # type: ignore[attr-defined]
isolation_level = db.extra_dejson.get("isolation_level", "AUTOCOMMIT").upper()
return getattr(IsolationLevel, isolation_level, IsolationLevel.AUTOCOMMIT)
def get_records(
self,
sql: str | list[str] = "",
parameters: Iterable | Mapping | None = None,
) -> Any:
if not isinstance(sql, str):
raise ValueError(f"The sql in Presto Hook must be a string and is {sql}!")
try:
return super().get_records(self.strip_sql_string(sql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def get_first(self, sql: str | list[str] = "", parameters: Iterable | Mapping | None = None) -> Any:
if not isinstance(sql, str):
raise ValueError(f"The sql in Presto Hook must be a string and is {sql}!")
try:
return super().get_first(self.strip_sql_string(sql), parameters)
except DatabaseError as e:
raise PrestoException(e)
def get_pandas_df(self, sql: str = "", parameters=None, **kwargs):
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self.strip_sql_string(sql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise PrestoException(e)
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data, **kwargs)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame(**kwargs)
return df
def run(
self,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping | None = None,
handler: Callable | None = None,
split_statements: bool = False,
return_last: bool = True,
) -> Any | list[Any] | None:
return super().run(
sql=sql,
autocommit=autocommit,
parameters=parameters,
handler=handler,
split_statements=split_statements,
return_last=return_last,
)
def insert_rows(
self,
table: str,
rows: Iterable[tuple],
target_fields: Iterable[str] | None = None,
commit_every: int = 0,
replace: bool = False,
**kwargs,
) -> None:
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:param rows: The rows to insert into the table
:param target_fields: The names of the columns to fill in the table
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:param replace: Whether to replace instead of insert
"""
if self.get_isolation_level() == IsolationLevel.AUTOCOMMIT:
self.log.info(
"Transactions are not enable in presto connection. "
"Please use the isolation_level property to enable it. "
"Falling back to insert all rows in one transaction."
)
commit_every = 0
super().insert_rows(table, rows, target_fields, commit_every)
@staticmethod
def _serialize_cell(cell: Any, conn: Connection | None = None) -> Any:
"""
Presto will adapt all execute() args internally, hence we return cell without any conversion.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The cell
"""
return cell
| 8,805 | 37.286957 | 104 | py |
airflow | airflow-main/airflow/providers/presto/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/atlassian/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/atlassian/jira/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "2.1.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-atlassian-jira:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,539 | 35.666667 | 123 | py |
airflow | airflow-main/airflow/providers/atlassian/jira/operators/jira.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Sequence
from airflow.models import BaseOperator
from airflow.providers.atlassian.jira.hooks.jira import JiraHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class JiraOperator(BaseOperator):
"""JiraOperator to interact and perform action on Jira issue tracking system.
This operator is designed to use Atlassian Jira SDK. For more information:
https://atlassian-python-api.readthedocs.io/jira.html
:param jira_conn_id: Reference to a pre-defined Jira Connection.
:param jira_method: Method name from Atlassian Jira Python SDK to be called.
:param jira_method_args: Method parameters for the jira_method. (templated)
:param result_processor: Function to further process the response from Jira.
:param get_jira_resource_method: Function or operator to get Jira resource on which the provided
jira_method will be executed.
"""
template_fields: Sequence[str] = ("jira_method_args",)
def __init__(
self,
*,
jira_method: str,
jira_conn_id: str = "jira_default",
jira_method_args: dict | None = None,
result_processor: Callable | None = None,
get_jira_resource_method: Callable | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.jira_conn_id = jira_conn_id
self.method_name = jira_method
self.jira_method_args = jira_method_args or {}
self.result_processor = result_processor
self.get_jira_resource_method = get_jira_resource_method
def execute(self, context: Context) -> Any:
if self.get_jira_resource_method is not None:
# if get_jira_resource_method is provided, jira_method will be executed on
# resource returned by executing the get_jira_resource_method.
# This makes all the provided methods of atlassian-python-api JIRA sdk accessible and usable
# directly at the JiraOperator without additional wrappers.
# ref: https://atlassian-python-api.readthedocs.io/jira.html
if isinstance(self.get_jira_resource_method, JiraOperator):
resource = self.get_jira_resource_method.execute(**context)
else:
resource = self.get_jira_resource_method(**context)
else:
# Default method execution is on the top level jira client resource
hook = JiraHook(jira_conn_id=self.jira_conn_id)
resource = hook.client
jira_result: Any = getattr(resource, self.method_name)(**self.jira_method_args)
output = jira_result.get("id", None) if isinstance(jira_result, dict) else None
self.xcom_push(context, key="id", value=output)
if self.result_processor:
return self.result_processor(context, jira_result)
return jira_result
| 3,710 | 41.655172 | 104 | py |
airflow | airflow-main/airflow/providers/atlassian/jira/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/atlassian/jira/hooks/jira.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hook for JIRA."""
from __future__ import annotations
from typing import Any
from atlassian import Jira
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
class JiraHook(BaseHook):
"""
Jira interaction hook, a Wrapper around Atlassian Jira Python SDK.
:param jira_conn_id: reference to a pre-defined Jira Connection
"""
default_conn_name = "jira_default"
conn_type = "jira"
conn_name_attr = "jira_conn_id"
hook_name = "JIRA"
def __init__(self, jira_conn_id: str = default_conn_name, proxies: Any | None = None) -> None:
super().__init__()
self.jira_conn_id = jira_conn_id
self.proxies = proxies
self.client: Jira | None = None
self.get_conn()
def get_conn(self) -> Jira:
if not self.client:
self.log.debug("Creating Jira client for conn_id: %s", self.jira_conn_id)
verify = True
if not self.jira_conn_id:
raise AirflowException("Failed to create jira client. no jira_conn_id provided")
conn = self.get_connection(self.jira_conn_id)
if conn.extra is not None:
extra_options = conn.extra_dejson
# only required attributes are taken for now,
# more can be added ex: timeout, cloud, session
# verify
if "verify" in extra_options and extra_options["verify"].lower() == "false":
verify = False
self.client = Jira(
url=conn.host,
username=conn.login,
password=conn.password,
verify_ssl=verify,
proxies=self.proxies,
)
return self.client
| 2,546 | 32.96 | 98 | py |
airflow | airflow-main/airflow/providers/atlassian/jira/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/atlassian/jira/sensors/jira.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Sequence
from airflow.providers.atlassian.jira.hooks.jira import JiraHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class JiraSensor(BaseSensorOperator):
"""
Monitors a jira ticket for any change.
:param jira_conn_id: reference to a pre-defined Jira Connection
:param method_name: method name from atlassian-python-api JIRA sdk to execute
:param method_params: parameters for the method method_name
:param result_processor: function that return boolean and act as a sensor response
"""
def __init__(
self,
*,
method_name: str,
jira_conn_id: str = "jira_default",
method_params: dict | None = None,
result_processor: Callable | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.jira_conn_id = jira_conn_id
self.result_processor = None
if result_processor is not None:
self.result_processor = result_processor
self.method_name = method_name
self.method_params = method_params
def poke(self, context: Context) -> Any:
hook = JiraHook(jira_conn_id=self.jira_conn_id)
resource = hook.get_conn()
jira_result = getattr(resource, self.method_name)(**self.method_params)
if self.result_processor is None:
return jira_result
return self.result_processor(jira_result)
class JiraTicketSensor(JiraSensor):
"""
Monitors a jira ticket for given change in terms of function.
:param jira_conn_id: reference to a pre-defined Jira Connection
:param ticket_id: id of the ticket to be monitored
:param field: field of the ticket to be monitored
:param expected_value: expected value of the field
:param result_processor: function that return boolean and act as a sensor response
"""
template_fields: Sequence[str] = ("ticket_id",)
def __init__(
self,
*,
jira_conn_id: str = "jira_default",
ticket_id: str | None = None,
field: str | None = None,
expected_value: str | None = None,
field_checker_func: Callable | None = None,
**kwargs,
) -> None:
self.jira_conn_id = jira_conn_id
self.ticket_id = ticket_id
self.field = field
self.expected_value = expected_value
if field_checker_func is None:
field_checker_func = self.issue_field_checker
super().__init__(
jira_conn_id=jira_conn_id, method_name="issue", result_processor=field_checker_func, **kwargs
)
def poke(self, context: Context) -> Any:
self.log.info("Jira Sensor checking for change in ticket: %s", self.ticket_id)
self.method_name = "issue"
self.method_params = {"key": self.ticket_id, "fields": self.field}
return JiraSensor.poke(self, context=context)
def issue_field_checker(self, jira_result: dict) -> bool | None:
"""Check issue using different conditions to prepare to evaluate sensor."""
result = None
if jira_result is not None and self.field is not None and self.expected_value is not None:
field_val = jira_result.get("fields", {}).get(self.field, None)
if field_val is not None:
if isinstance(field_val, list):
result = self.expected_value in field_val
elif isinstance(field_val, str):
result = self.expected_value.lower() == field_val.lower()
elif isinstance(field_val, dict) and field_val.get("name", None):
result = self.expected_value.lower() == field_val.get("name", "").lower()
else:
self.log.warning(
"Not implemented checker for issue field %s which "
"is neither string nor list nor Jira Resource",
self.field,
)
if result is True:
self.log.info(
"Issue field %s has expected value %s, returning success", self.field, self.expected_value
)
else:
self.log.info("Issue field %s don't have expected value %s yet.", self.field, self.expected_value)
return result
| 5,194 | 37.768657 | 110 | py |
airflow | airflow-main/airflow/providers/atlassian/jira/sensors/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/trino/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "5.2.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-trino:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,530 | 35.452381 | 114 | py |
airflow | airflow-main/airflow/providers/trino/transfers/gcs_to_trino.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage to Trino operator."""
from __future__ import annotations
import csv
import json
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Iterable, Sequence
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.trino.hooks.trino import TrinoHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToTrinoOperator(BaseOperator):
"""
Loads a csv file from Google Cloud Storage into a Trino table.
Assumptions:
1. CSV file should not have headers
2. Trino table with requisite columns is already created
3. Optionally, a separate JSON file with headers can be provided
:param source_bucket: Source GCS bucket that contains the csv
:param source_object: csv file including the path
:param trino_table: trino table to upload the data
:param trino_conn_id: destination trino connection
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud and
interact with the Google Cloud Storage service.
:param schema_fields: The names of the columns to fill in the table. If schema_fields is
provided, any path provided in the schema object will be
:param schema_object: JSON file with schema fields
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_object",
"trino_table",
)
def __init__(
self,
*,
source_bucket: str,
source_object: str,
trino_table: str,
trino_conn_id: str = "trino_default",
gcp_conn_id: str = "google_cloud_default",
schema_fields: Iterable[str] | None = None,
schema_object: str | None = None,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.trino_table = trino_table
self.trino_conn_id = trino_conn_id
self.gcp_conn_id = gcp_conn_id
self.schema_fields = schema_fields
self.schema_object = schema_object
self.impersonation_chain = impersonation_chain
def execute(self, context: Context) -> None:
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
trino_hook = TrinoHook(trino_conn_id=self.trino_conn_id)
with NamedTemporaryFile("w+") as temp_file:
self.log.info("Downloading data from %s", self.source_object)
gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.source_object,
filename=temp_file.name,
)
data = csv.reader(temp_file)
rows = (tuple(row) for row in data)
self.log.info("Inserting data into %s", self.trino_table)
if self.schema_fields:
trino_hook.insert_rows(table=self.trino_table, rows=rows, target_fields=self.schema_fields)
elif self.schema_object:
blob = gcs_hook.download(
bucket_name=self.source_bucket,
object_name=self.schema_object,
)
schema_fields = json.loads(blob.decode("utf-8"))
trino_hook.insert_rows(table=self.trino_table, rows=rows, target_fields=schema_fields)
else:
trino_hook.insert_rows(table=self.trino_table, rows=rows)
| 4,999 | 40.322314 | 107 | py |
airflow | airflow-main/airflow/providers/trino/transfers/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/trino/operators/trino.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains the Trino operator."""
from __future__ import annotations
import warnings
from typing import Any, Sequence
from trino.exceptions import TrinoQueryError
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.common.sql.operators.sql import SQLExecuteQueryOperator
from airflow.providers.trino.hooks.trino import TrinoHook
class TrinoOperator(SQLExecuteQueryOperator):
"""
Executes sql code using a specific Trino query Engine.
This class is deprecated.
Please use :class:`airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator`.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:TrinoOperator`
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
:param trino_conn_id: id of the connection config for the target Trino
environment
:param autocommit: What to set the connection's autocommit setting to
before executing the query
:param handler: The result handler which is called with the result of each statement.
:param parameters: (optional) the parameters to render the SQL query with.
"""
template_fields: Sequence[str] = ("sql",)
template_fields_renderers = {"sql": "sql"}
template_ext: Sequence[str] = (".sql",)
ui_color = "#ededed"
def __init__(self, *, trino_conn_id: str = "trino_default", **kwargs: Any) -> None:
super().__init__(conn_id=trino_conn_id, **kwargs)
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator`.""",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
def on_kill(self) -> None:
if self._hook is not None and isinstance(self._hook, TrinoHook):
query_id = "'" + self._hook.query_id + "'"
try:
self.log.info("Stopping query run with queryId - %s", self._hook.query_id)
self._hook.run(
sql=f"CALL system.runtime.kill_query(query_id => {query_id},message => 'Job "
f"killed by "
f"user');",
handler=list,
)
except TrinoQueryError as e:
self.log.info(str(e))
self.log.info("Trino query (%s) terminated", query_id)
| 3,293 | 39.666667 | 97 | py |
airflow | airflow-main/airflow/providers/trino/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/trino/hooks/trino.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import os
from typing import Any, Callable, Iterable, Mapping
import trino
from trino.exceptions import DatabaseError
from trino.transaction import IsolationLevel
from airflow import AirflowException
from airflow.configuration import conf
from airflow.models import Connection
from airflow.providers.common.sql.hooks.sql import DbApiHook
from airflow.utils.operator_helpers import AIRFLOW_VAR_NAME_FORMAT_MAPPING, DEFAULT_FORMAT_PREFIX
def generate_trino_client_info() -> str:
"""Return json string with dag_id, task_id, execution_date and try_number."""
context_var = {
format_map["default"].replace(DEFAULT_FORMAT_PREFIX, ""): os.environ.get(
format_map["env_var_format"], ""
)
for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values()
}
task_info = {
"dag_id": context_var["dag_id"],
"task_id": context_var["task_id"],
"execution_date": context_var["execution_date"],
"try_number": context_var["try_number"],
"dag_run_id": context_var["dag_run_id"],
"dag_owner": context_var["dag_owner"],
}
return json.dumps(task_info, sort_keys=True)
class TrinoException(Exception):
"""Trino exception."""
def _boolify(value):
if isinstance(value, bool):
return value
if isinstance(value, str):
if value.lower() == "false":
return False
elif value.lower() == "true":
return True
return value
class TrinoHook(DbApiHook):
"""
Interact with Trino through trino package.
>>> ph = TrinoHook()
>>> sql = "SELECT count(1) AS num FROM airflow.static_babynames"
>>> ph.get_records(sql)
[[340698]]
"""
conn_name_attr = "trino_conn_id"
default_conn_name = "trino_default"
conn_type = "trino"
hook_name = "Trino"
query_id = ""
placeholder = "?"
_test_connection_sql = "select 1"
def get_conn(self) -> Connection:
"""Returns a connection object."""
db = self.get_connection(self.trino_conn_id) # type: ignore[attr-defined]
extra = db.extra_dejson
auth = None
user = db.login
if db.password and extra.get("auth") in ("kerberos", "certs"):
raise AirflowException(f"The {extra.get('auth')!r} authorization type doesn't support password.")
elif db.password:
auth = trino.auth.BasicAuthentication(db.login, db.password) # type: ignore[attr-defined]
elif extra.get("auth") == "jwt":
if "jwt__file" in extra:
with open(extra.get("jwt__file")) as jwt_file:
token = jwt_file.read()
else:
token = extra.get("jwt__token")
auth = trino.auth.JWTAuthentication(token=token)
elif extra.get("auth") == "certs":
auth = trino.auth.CertificateAuthentication(
extra.get("certs__client_cert_path"),
extra.get("certs__client_key_path"),
)
elif extra.get("auth") == "kerberos":
auth = trino.auth.KerberosAuthentication( # type: ignore[attr-defined]
config=extra.get("kerberos__config", os.environ.get("KRB5_CONFIG")),
service_name=extra.get("kerberos__service_name"),
mutual_authentication=_boolify(extra.get("kerberos__mutual_authentication", False)),
force_preemptive=_boolify(extra.get("kerberos__force_preemptive", False)),
hostname_override=extra.get("kerberos__hostname_override"),
sanitize_mutual_error_response=_boolify(
extra.get("kerberos__sanitize_mutual_error_response", True)
),
principal=extra.get("kerberos__principal", conf.get("kerberos", "principal")),
delegate=_boolify(extra.get("kerberos__delegate", False)),
ca_bundle=extra.get("kerberos__ca_bundle"),
)
if _boolify(extra.get("impersonate_as_owner", False)):
user = os.getenv("AIRFLOW_CTX_DAG_OWNER", None)
if user is None:
user = db.login
http_headers = {"X-Trino-Client-Info": generate_trino_client_info()}
trino_conn = trino.dbapi.connect(
host=db.host,
port=db.port,
user=user,
source=extra.get("source", "airflow"),
http_scheme=extra.get("protocol", "http"),
http_headers=http_headers,
catalog=extra.get("catalog", "hive"),
schema=db.schema,
auth=auth,
# type: ignore[func-returns-value]
isolation_level=self.get_isolation_level(),
verify=_boolify(extra.get("verify", True)),
session_properties=extra.get("session_properties") or None,
client_tags=extra.get("client_tags") or None,
)
return trino_conn
def get_isolation_level(self) -> Any:
"""Returns an isolation level."""
db = self.get_connection(self.trino_conn_id) # type: ignore[attr-defined]
isolation_level = db.extra_dejson.get("isolation_level", "AUTOCOMMIT").upper()
return getattr(IsolationLevel, isolation_level, IsolationLevel.AUTOCOMMIT)
def get_records(
self,
sql: str | list[str] = "",
parameters: Iterable | Mapping | None = None,
) -> Any:
if not isinstance(sql, str):
raise ValueError(f"The sql in Trino Hook must be a string and is {sql}!")
try:
return super().get_records(self.strip_sql_string(sql), parameters)
except DatabaseError as e:
raise TrinoException(e)
def get_first(self, sql: str | list[str] = "", parameters: Iterable | Mapping | None = None) -> Any:
if not isinstance(sql, str):
raise ValueError(f"The sql in Trino Hook must be a string and is {sql}!")
try:
return super().get_first(self.strip_sql_string(sql), parameters)
except DatabaseError as e:
raise TrinoException(e)
def get_pandas_df(
self, sql: str = "", parameters: Iterable | Mapping | None = None, **kwargs
): # type: ignore[override]
import pandas
cursor = self.get_cursor()
try:
cursor.execute(self.strip_sql_string(sql), parameters)
data = cursor.fetchall()
except DatabaseError as e:
raise TrinoException(e)
column_descriptions = cursor.description
if data:
df = pandas.DataFrame(data, **kwargs)
df.columns = [c[0] for c in column_descriptions]
else:
df = pandas.DataFrame(**kwargs)
return df
def run(
self,
sql: str | Iterable[str],
autocommit: bool = False,
parameters: Iterable | Mapping | None = None,
handler: Callable | None = None,
split_statements: bool = False,
return_last: bool = True,
) -> Any | list[Any] | None:
return super().run(
sql=sql,
autocommit=autocommit,
parameters=parameters,
handler=handler,
split_statements=split_statements,
return_last=return_last,
)
def insert_rows(
self,
table: str,
rows: Iterable[tuple],
target_fields: Iterable[str] | None = None,
commit_every: int = 0,
replace: bool = False,
**kwargs,
) -> None:
"""
A generic way to insert a set of tuples into a table.
:param table: Name of the target table
:param rows: The rows to insert into the table
:param target_fields: The names of the columns to fill in the table
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:param replace: Whether to replace instead of insert
"""
if self.get_isolation_level() == IsolationLevel.AUTOCOMMIT:
self.log.info(
"Transactions are not enable in trino connection. "
"Please use the isolation_level property to enable it. "
"Falling back to insert all rows in one transaction."
)
commit_every = 0
super().insert_rows(table, rows, target_fields, commit_every, replace)
@staticmethod
def _serialize_cell(cell: Any, conn: Connection | None = None) -> Any:
"""
Trino will adapt all execute() args internally, hence we return cell without any conversion.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The cell
"""
return cell
| 9,552 | 37.212 | 109 | py |
airflow | airflow-main/airflow/providers/trino/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/jdbc/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.0.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-jdbc:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,529 | 35.428571 | 113 | py |
airflow | airflow-main/airflow/providers/jdbc/operators/jdbc.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import Sequence
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.common.sql.operators.sql import SQLExecuteQueryOperator
class JdbcOperator(SQLExecuteQueryOperator):
"""
Executes sql code in a database using jdbc driver.
Requires jaydebeapi.
This class is deprecated.
Please use :class:`airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator` instead.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:JdbcOperator`
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
Template references are recognized by str ending in '.sql'
:param jdbc_conn_id: reference to a predefined database
:param autocommit: if True, each command is automatically committed.
(default value: False)
:param parameters: (optional) the parameters to render the SQL query with.
"""
template_fields: Sequence[str] = ("sql",)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
ui_color = "#ededed"
def __init__(self, *, jdbc_conn_id: str = "jdbc_default", **kwargs) -> None:
super().__init__(conn_id=jdbc_conn_id, **kwargs)
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator`.""",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
| 2,432 | 37.619048 | 99 | py |
airflow | airflow-main/airflow/providers/jdbc/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/jdbc/hooks/jdbc.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any
import jaydebeapi
from airflow.models.connection import Connection
from airflow.providers.common.sql.hooks.sql import DbApiHook
class JdbcHook(DbApiHook):
"""General hook for JDBC access.
JDBC URL, username and password will be taken from the predefined connection.
Note that the whole JDBC URL must be specified in the "host" field in the DB.
Raises an airflow error if the given connection id doesn't exist.
To configure driver parameters, you can use the following methods:
1. Supply them as constructor arguments when instantiating the hook.
2. Set the "driver_path" and/or "driver_class" parameters in the "hook_params" dictionary when
creating the hook using SQL operators.
3. Set the "driver_path" and/or "driver_class" extra in the connection and correspondingly enable
the "allow_driver_path_in_extra" and/or "allow_driver_class_in_extra" options in the
"providers.jdbc" section of the Airflow configuration. If you're enabling these options in Airflow
configuration, you should make sure that you trust the users who can edit connections in the UI
to not use it maliciously.
4. Patch the ``JdbcHook.default_driver_path`` and/or ``JdbcHook.default_driver_class`` values in the
"local_settings.py" file.
See :doc:`/connections/jdbc` for full documentation.
:param args: passed to DbApiHook
:param driver_path: path to the JDBC driver jar file. See above for more info
:param driver_class: name of the JDBC driver class. See above for more info
:param kwargs: passed to DbApiHook
"""
conn_name_attr = "jdbc_conn_id"
default_conn_name = "jdbc_default"
conn_type = "jdbc"
hook_name = "JDBC Connection"
supports_autocommit = True
default_driver_path: str | None = None
default_driver_class: str | None = None
def __init__(
self,
*args,
driver_path: str | None = None,
driver_class: str | None = None,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self._driver_path = driver_path
self._driver_class = driver_class
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Get custom field behaviour."""
return {
"hidden_fields": ["port", "schema"],
"relabeling": {"host": "Connection URL"},
}
@property
def connection_extra_lower(self) -> dict:
"""
``connection.extra_dejson`` but where keys are converted to lower case.
This is used internally for case-insensitive access of jdbc params.
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
return {k.lower(): v for k, v in conn.extra_dejson.items()}
@property
def driver_path(self) -> str | None:
from airflow.configuration import conf
extra_driver_path = self.connection_extra_lower.get("driver_path")
if extra_driver_path:
if conf.getboolean("providers.jdbc", "allow_driver_path_in_extra", fallback=False):
self._driver_path = extra_driver_path
else:
self.log.warning(
"You have supplied 'driver_path' via connection extra but it will not be used. In order "
"to use 'driver_path' from extra you must set airflow config setting "
"`allow_driver_path_in_extra = True` in section `providers.jdbc`. Alternatively you may "
"specify it via 'driver_path' parameter of the hook constructor or via 'hook_params' "
"dictionary with key 'driver_path' if using SQL operators."
)
if not self._driver_path:
self._driver_path = self.default_driver_path
return self._driver_path
@property
def driver_class(self) -> str | None:
from airflow.configuration import conf
extra_driver_class = self.connection_extra_lower.get("driver_class")
if extra_driver_class:
if conf.getboolean("providers.jdbc", "allow_driver_class_in_extra", fallback=False):
self._driver_class = extra_driver_class
else:
self.log.warning(
"You have supplied 'driver_class' via connection extra but it will not be used. In order "
"to use 'driver_class' from extra you must set airflow config setting "
"`allow_driver_class_in_extra = True` in section `providers.jdbc`. Alternatively you may "
"specify it via 'driver_class' parameter of the hook constructor or via 'hook_params' "
"dictionary with key 'driver_class' if using SQL operators."
)
if not self._driver_class:
self._driver_class = self.default_driver_class
return self._driver_class
def get_conn(self) -> jaydebeapi.Connection:
conn: Connection = self.get_connection(getattr(self, self.conn_name_attr))
host: str = conn.host
login: str = conn.login
psw: str = conn.password
conn = jaydebeapi.connect(
jclassname=self.driver_class,
url=str(host),
driver_args=[str(login), str(psw)],
jars=self.driver_path.split(",") if self.driver_path else None,
)
return conn
def set_autocommit(self, conn: jaydebeapi.Connection, autocommit: bool) -> None:
"""Set autocommit for the given connection.
:param conn: The connection.
:param autocommit: The connection's autocommit setting.
"""
conn.jconn.setAutoCommit(autocommit)
def get_autocommit(self, conn: jaydebeapi.Connection) -> bool:
"""Get autocommit setting for the provided connection.
:param conn: Connection to get autocommit setting from.
:return: connection autocommit setting. True if ``autocommit`` is set
to True on the connection. False if it is either not set, set to
False, or the connection does not support auto-commit.
"""
return conn.jconn.getAutoCommit()
| 7,023 | 41.829268 | 110 | py |
airflow | airflow-main/airflow/providers/jdbc/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/docker/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.7.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-docker:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,531 | 35.47619 | 115 | py |
airflow | airflow-main/airflow/providers/docker/decorators/docker.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import base64
import inspect
import os
import pickle
from tempfile import TemporaryDirectory
from textwrap import dedent
from typing import TYPE_CHECKING, Callable, Sequence
import dill
from airflow.decorators.base import DecoratedOperator, task_decorator_factory
from airflow.providers.docker.operators.docker import DockerOperator
try:
from airflow.utils.decorators import remove_task_decorator
# This can be removed after we move to Airflow 2.4+
except ImportError:
from airflow.utils.python_virtualenv import remove_task_decorator
from airflow.utils.python_virtualenv import write_python_script
if TYPE_CHECKING:
from airflow.decorators.base import TaskDecorator
from airflow.utils.context import Context
def _generate_decode_command(env_var, file, python_command):
# We don't need `f.close()` as the interpreter is about to exit anyway
return (
f'{python_command} -c "import base64, os;'
rf"x = base64.b64decode(os.environ[\"{env_var}\"]);"
rf'f = open(\"{file}\", \"wb\"); f.write(x);"'
)
def _b64_encode_file(filename):
with open(filename, "rb") as file_to_encode:
return base64.b64encode(file_to_encode.read())
class _DockerDecoratedOperator(DecoratedOperator, DockerOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param python: Python binary name to use
:param use_dill: Whether dill should be used to serialize the callable
:param expect_airflow: whether to expect airflow to be installed in the docker environment. if this
one is specified, the script to run callable will attempt to load Airflow macros.
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys.
Defaults to False.
"""
custom_operator_name = "@task.docker"
template_fields: Sequence[str] = (*DockerOperator.template_fields, "op_args", "op_kwargs")
def __init__(
self,
use_dill=False,
python_command="python3",
expect_airflow: bool = True,
**kwargs,
) -> None:
command = "placeholder command"
self.python_command = python_command
self.expect_airflow = expect_airflow
self.pickling_library = dill if use_dill else pickle
super().__init__(
command=command, retrieve_output=True, retrieve_output_path="/tmp/script.out", **kwargs
)
def generate_command(self):
return (
f"""bash -cx '{_generate_decode_command("__PYTHON_SCRIPT", "/tmp/script.py",
self.python_command)} &&"""
f'{_generate_decode_command("__PYTHON_INPUT", "/tmp/script.in", self.python_command)} &&'
f"{self.python_command} /tmp/script.py /tmp/script.in /tmp/script.out'"
)
def execute(self, context: Context):
with TemporaryDirectory(prefix="venv") as tmp_dir:
input_filename = os.path.join(tmp_dir, "script.in")
script_filename = os.path.join(tmp_dir, "script.py")
with open(input_filename, "wb") as file:
if self.op_args or self.op_kwargs:
self.pickling_library.dump({"args": self.op_args, "kwargs": self.op_kwargs}, file)
py_source = self.get_python_source()
write_python_script(
jinja_context=dict(
op_args=self.op_args,
op_kwargs=self.op_kwargs,
pickling_library=self.pickling_library.__name__,
python_callable=self.python_callable.__name__,
python_callable_source=py_source,
expect_airflow=self.expect_airflow,
string_args_global=False,
),
filename=script_filename,
)
# Pass the python script to be executed, and the input args, via environment variables. This is
# more than slightly hacky, but it means it can work when Airflow itself is in the same Docker
# engine where this task is going to run (unlike say trying to mount a file in)
self.environment["__PYTHON_SCRIPT"] = _b64_encode_file(script_filename)
if self.op_args or self.op_kwargs:
self.environment["__PYTHON_INPUT"] = _b64_encode_file(input_filename)
else:
self.environment["__PYTHON_INPUT"] = ""
self.command = self.generate_command()
return super().execute(context)
# TODO: Remove me once this provider min supported Airflow version is 2.6
def get_python_source(self):
raw_source = inspect.getsource(self.python_callable)
res = dedent(raw_source)
res = remove_task_decorator(res, self.custom_operator_name)
return res
def docker_task(
python_callable: Callable | None = None,
multiple_outputs: bool | None = None,
**kwargs,
) -> TaskDecorator:
"""
Python operator decorator; wraps a function into an Airflow operator.
Also accepts any argument that DockerOperator will via ``kwargs``. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: If set, function return value will be unrolled to multiple XCom values.
Dict will unroll to XCom values with keys as XCom keys. Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_DockerDecoratedOperator,
**kwargs,
)
| 6,806 | 39.760479 | 107 | py |
airflow | airflow-main/airflow/providers/docker/decorators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/docker/operators/docker_swarm.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Run ephemeral Docker Swarm services."""
from __future__ import annotations
from typing import TYPE_CHECKING
from docker import types
from airflow.exceptions import AirflowException
from airflow.providers.docker.operators.docker import DockerOperator
from airflow.utils.strings import get_random_string
if TYPE_CHECKING:
from airflow.utils.context import Context
class DockerSwarmOperator(DockerOperator):
"""
Execute a command as an ephemeral docker swarm service.
Example use-case - Using Docker Swarm orchestration to make one-time
scripts highly available.
A temporary directory is created on the host and
mounted into a container to allow storing files
that together exceed the default disk size of 10GB in a container.
The path to the mounted directory can be accessed
via the environment variable ``AIRFLOW_TMP_DIR``.
If a login to a private registry is required prior to pulling the image, a
Docker connection needs to be configured in Airflow and the connection ID
be provided with the parameter ``docker_conn_id``.
:param image: Docker image from which to create the container.
If image tag is omitted, "latest" will be used.
:param api_version: Remote API version. Set to ``auto`` to automatically
detect the server's version.
:param auto_remove: Auto-removal of the container on daemon side when the
container's process exits.
The default is False.
:param command: Command to be run in the container. (templated)
:param docker_url: URL of the host running the docker daemon.
Default is unix://var/run/docker.sock
:param environment: Environment variables to set in the container. (templated)
:param force_pull: Pull the docker image on every run. Default is False.
:param mem_limit: Maximum amount of memory the container can use.
Either a float value, which represents the limit in bytes,
or a string like ``128m`` or ``1g``.
:param tls_ca_cert: Path to a PEM-encoded certificate authority
to secure the docker connection.
:param tls_client_cert: Path to the PEM-encoded certificate
used to authenticate docker client.
:param tls_client_key: Path to the PEM-encoded key used to authenticate docker client.
:param tls_hostname: Hostname to match against
the docker server certificate or False to disable the check.
:param tls_ssl_version: Version of SSL to use when communicating with docker daemon.
:param tmp_dir: Mount point inside the container to
a temporary directory created on the host by the operator.
The path is also made available via the environment variable
``AIRFLOW_TMP_DIR`` inside the container.
:param user: Default user inside the docker container.
:param docker_conn_id: The :ref:`Docker connection id <howto/connection:docker>`
:param tty: Allocate pseudo-TTY to the container of this service
This needs to be set see logs of the Docker container / service.
:param enable_logging: Show the application's logs in operator's logs.
Supported only if the Docker engine is using json-file or journald logging drivers.
The `tty` parameter should be set to use this with Python applications.
:param configs: List of docker configs to be exposed to the containers of the swarm service.
The configs are ConfigReference objects as per the docker api
[https://docker-py.readthedocs.io/en/stable/services.html#docker.models.services.ServiceCollection.create]_
:param secrets: List of docker secrets to be exposed to the containers of the swarm service.
The secrets are SecretReference objects as per the docker create_service api.
[https://docker-py.readthedocs.io/en/stable/services.html#docker.models.services.ServiceCollection.create]_
:param mode: Indicate whether a service should be deployed as a replicated or global service,
and associated parameters
:param networks: List of network names or IDs or NetworkAttachmentConfig to attach the service to.
:param placement: Placement instructions for the scheduler. If a list is passed instead,
it is assumed to be a list of constraints as part of a Placement object.
"""
def __init__(
self,
*,
image: str,
enable_logging: bool = True,
configs: list[types.ConfigReference] | None = None,
secrets: list[types.SecretReference] | None = None,
mode: types.ServiceMode | None = None,
networks: list[str | types.NetworkAttachmentConfig] | None = None,
placement: types.Placement | list[types.Placement] | None = None,
**kwargs,
) -> None:
super().__init__(image=image, **kwargs)
self.enable_logging = enable_logging
self.service = None
self.configs = configs
self.secrets = secrets
self.mode = mode
self.networks = networks
self.placement = placement
def execute(self, context: Context) -> None:
self.environment["AIRFLOW_TMP_DIR"] = self.tmp_dir
return self._run_service()
def _run_service(self) -> None:
self.log.info("Starting docker service from image %s", self.image)
self.service = self.cli.create_service(
types.TaskTemplate(
container_spec=types.ContainerSpec(
image=self.image,
command=self.format_command(self.command),
mounts=self.mounts,
env=self.environment,
user=self.user,
tty=self.tty,
configs=self.configs,
secrets=self.secrets,
),
restart_policy=types.RestartPolicy(condition="none"),
resources=types.Resources(mem_limit=self.mem_limit),
networks=self.networks,
placement=self.placement,
),
name=f"airflow-{get_random_string()}",
labels={"name": f"airflow__{self.dag_id}__{self.task_id}"},
mode=self.mode,
)
if self.service is None:
raise Exception("Service should be set here")
self.log.info("Service started: %s", str(self.service))
# wait for the service to start the task
while not self.cli.tasks(filters={"service": self.service["ID"]}):
continue
if self.enable_logging:
self._stream_logs_to_output()
while True:
if self._has_service_terminated():
self.log.info("Service status before exiting: %s", self._service_status())
break
self.log.info("auto_removeauto_removeauto_removeauto_removeauto_remove : %s", str(self.auto_remove))
if self.service and self._service_status() != "complete":
if self.auto_remove == "success":
self.cli.remove_service(self.service["ID"])
raise AirflowException("Service did not complete: " + repr(self.service))
elif self.auto_remove == "success":
if not self.service:
raise Exception("The 'service' should be initialized before!")
self.cli.remove_service(self.service["ID"])
def _service_status(self) -> str | None:
if not self.service:
raise Exception("The 'service' should be initialized before!")
return self.cli.tasks(filters={"service": self.service["ID"]})[0]["Status"]["State"]
def _has_service_terminated(self) -> bool:
status = self._service_status()
return status in ["complete", "failed", "shutdown", "rejected", "orphaned", "remove"]
def _stream_logs_to_output(self) -> None:
if not self.service:
raise Exception("The 'service' should be initialized before!")
logs = self.cli.service_logs(
self.service["ID"], follow=True, stdout=True, stderr=True, is_tty=self.tty
)
line = ""
while True:
try:
log = next(logs)
except StopIteration:
# If the service log stream terminated, stop fetching logs further.
break
else:
try:
log = log.decode()
except UnicodeDecodeError:
continue
if log == "\n":
self.log.info(line)
line = ""
else:
line += log
# flush any remaining log stream
if line:
self.log.info(line)
def on_kill(self) -> None:
if self.hook.client_created and self.service is not None:
self.log.info("Removing docker service: %s", self.service["ID"])
self.cli.remove_service(self.service["ID"])
| 9,637 | 44.895238 | 115 | py |
airflow | airflow-main/airflow/providers/docker/operators/docker.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Implements Docker operator."""
from __future__ import annotations
import ast
import pickle
import tarfile
import warnings
from collections.abc import Container
from functools import cached_property
from io import BytesIO, StringIO
from tempfile import TemporaryDirectory
from typing import TYPE_CHECKING, Iterable, Sequence
from docker.constants import DEFAULT_TIMEOUT_SECONDS
from docker.errors import APIError
from docker.types import LogConfig, Mount
from dotenv import dotenv_values
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning, AirflowSkipException
from airflow.models import BaseOperator
from airflow.providers.docker.hooks.docker import DockerHook
if TYPE_CHECKING:
from docker import APIClient
from docker.types import DeviceRequest
from airflow.utils.context import Context
def stringify(line: str | bytes):
"""Make sure string is returned even if bytes are passed. Docker stream can return bytes."""
decode_method = getattr(line, "decode", None)
if decode_method:
return decode_method(encoding="utf-8", errors="surrogateescape")
else:
return line
class DockerOperator(BaseOperator):
"""Execute a command inside a docker container.
By default, a temporary directory is
created on the host and mounted into a container to allow storing files
that together exceed the default disk size of 10GB in a container.
In this case The path to the mounted directory can be accessed
via the environment variable ``AIRFLOW_TMP_DIR``.
If the volume cannot be mounted, warning is printed and an attempt is made to execute the docker
command without the temporary folder mounted. This is to make it works by default with remote docker
engine or when you run docker-in-docker solution and temporary directory is not shared with the
docker engine. Warning is printed in logs in this case.
If you know you run DockerOperator with remote engine or via docker-in-docker
you should set ``mount_tmp_dir`` parameter to False. In this case, you can still use
``mounts`` parameter to mount already existing named volumes in your Docker Engine
to achieve similar capability where you can store files exceeding default disk size
of the container,
If a login to a private registry is required prior to pulling the image, a
Docker connection needs to be configured in Airflow and the connection ID
be provided with the parameter ``docker_conn_id``.
:param image: Docker image from which to create the container.
If image tag is omitted, "latest" will be used. (templated)
:param api_version: Remote API version. Set to ``auto`` to automatically
detect the server's version.
:param command: Command to be run in the container. (templated)
:param container_name: Name of the container. Optional (templated)
:param cpus: Number of CPUs to assign to the container.
This value gets multiplied with 1024. See
https://docs.docker.com/engine/reference/run/#cpu-share-constraint
:param docker_url: URL of the host running the docker daemon.
Default is unix://var/run/docker.sock
:param environment: Environment variables to set in the container. (templated)
:param private_environment: Private environment variables to set in the container.
These are not templated, and hidden from the website.
:param env_file: Relative path to the .env file with environment variables to set in the container.
Overridden by variables in the environment parameter. (templated)
:param force_pull: Pull the docker image on every run. Default is False.
:param mem_limit: Maximum amount of memory the container can use.
Either a float value, which represents the limit in bytes,
or a string like ``128m`` or ``1g``.
:param host_tmp_dir: Specify the location of the temporary directory on the host which will
be mapped to tmp_dir. If not provided defaults to using the standard system temp directory.
:param network_mode: Network mode for the container.
It can be one of the following:
bridge - Create new network stack for the container with default docker bridge network
None - No networking for this container
container:<name|id> - Use the network stack of another container specified via <name|id>
host - Use the host network stack. Incompatible with `port_bindings`
'<network-name>|<network-id>' - Connects the container to user created network
(using `docker network create` command)
:param tls_ca_cert: Path to a PEM-encoded certificate authority
to secure the docker connection.
:param tls_client_cert: Path to the PEM-encoded certificate
used to authenticate docker client.
:param tls_client_key: Path to the PEM-encoded key used to authenticate docker client.
:param tls_verify: Set ``True`` to verify the validity of the provided certificate.
:param tls_hostname: Hostname to match against
the docker server certificate or False to disable the check.
:param tls_ssl_version: Version of SSL to use when communicating with docker daemon.
:param mount_tmp_dir: Specify whether the temporary directory should be bind-mounted
from the host to the container. Defaults to True
:param tmp_dir: Mount point inside the container to
a temporary directory created on the host by the operator.
The path is also made available via the environment variable
``AIRFLOW_TMP_DIR`` inside the container.
:param user: Default user inside the docker container.
:param mounts: List of volumes to mount into the container. Each item should
be a :py:class:`docker.types.Mount` instance.
:param entrypoint: Overwrite the default ENTRYPOINT of the image
:param working_dir: Working directory to
set on the container (equivalent to the -w switch the docker client)
:param xcom_all: Push all the stdout or just the last line.
The default is False (last line).
:param docker_conn_id: The :ref:`Docker connection id <howto/connection:docker>`
:param dns: Docker custom DNS servers
:param dns_search: Docker custom DNS search domain
:param auto_remove: Auto-removal of the container on daemon side when the
container's process exits.
The default is never.
:param shm_size: Size of ``/dev/shm`` in bytes. The size must be
greater than 0. If omitted uses system default.
:param tty: Allocate pseudo-TTY to the container
This needs to be set see logs of the Docker container.
:param hostname: Optional hostname for the container.
:param privileged: Give extended privileges to this container.
:param cap_add: Include container capabilities
:param retrieve_output: Should this docker image consistently attempt to pull from and output
file before manually shutting down the image. Useful for cases where users want a pickle serialized
output that is not posted to logs
:param retrieve_output_path: path for output file that will be retrieved and passed to xcom
:param device_requests: Expose host resources such as GPUs to the container.
:param log_opts_max_size: The maximum size of the log before it is rolled.
A positive integer plus a modifier representing the unit of measure (k, m, or g).
Eg: 10m or 1g Defaults to -1 (unlimited).
:param log_opts_max_file: The maximum number of log files that can be present.
If rolling the logs creates excess files, the oldest file is removed.
Only effective when max-size is also set. A positive integer. Defaults to 1.
:param ipc_mode: Set the IPC mode for the container.
:param skip_on_exit_code: If task exits with this exit code, leave the task
in ``skipped`` state (default: None). If set to ``None``, any non-zero
exit code will be treated as a failure.
:param port_bindings: Publish a container's port(s) to the host. It is a
dictionary of value where the key indicates the port to open inside the container
and value indicates the host port that binds to the container port.
Incompatible with ``host`` in ``network_mode``.
"""
template_fields: Sequence[str] = ("image", "command", "environment", "env_file", "container_name")
template_fields_renderers = {"env_file": "yaml"}
template_ext: Sequence[str] = (
".sh",
".bash",
".env",
)
def __init__(
self,
*,
image: str,
api_version: str | None = None,
command: str | list[str] | None = None,
container_name: str | None = None,
cpus: float = 1.0,
docker_url: str = "unix://var/run/docker.sock",
environment: dict | None = None,
private_environment: dict | None = None,
env_file: str | None = None,
force_pull: bool = False,
mem_limit: float | str | None = None,
host_tmp_dir: str | None = None,
network_mode: str | None = None,
tls_ca_cert: str | None = None,
tls_client_cert: str | None = None,
tls_client_key: str | None = None,
tls_verify: bool = True,
tls_hostname: str | bool | None = None,
tls_ssl_version: str | None = None,
mount_tmp_dir: bool = True,
tmp_dir: str = "/tmp/airflow",
user: str | int | None = None,
mounts: list[Mount] | None = None,
entrypoint: str | list[str] | None = None,
working_dir: str | None = None,
xcom_all: bool = False,
docker_conn_id: str | None = None,
dns: list[str] | None = None,
dns_search: list[str] | None = None,
auto_remove: str = "never",
shm_size: int | None = None,
tty: bool = False,
hostname: str | None = None,
privileged: bool = False,
cap_add: Iterable[str] | None = None,
extra_hosts: dict[str, str] | None = None,
retrieve_output: bool = False,
retrieve_output_path: str | None = None,
timeout: int = DEFAULT_TIMEOUT_SECONDS,
device_requests: list[DeviceRequest] | None = None,
log_opts_max_size: str | None = None,
log_opts_max_file: str | None = None,
ipc_mode: str | None = None,
skip_exit_code: int | None = None,
skip_on_exit_code: int | Container[int] | None = None,
port_bindings: dict | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.api_version = api_version
if type(auto_remove) == bool:
warnings.warn(
"bool value for auto_remove is deprecated, please use 'never', 'success', or 'force' instead",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
if str(auto_remove) == "False":
self.auto_remove = "never"
elif str(auto_remove) == "True":
self.auto_remove = "success"
elif str(auto_remove) in ("never", "success", "force"):
self.auto_remove = auto_remove
else:
raise ValueError("unsupported auto_remove option, use 'never', 'success', or 'force' instead")
self.command = command
self.container_name = container_name
self.cpus = cpus
self.dns = dns
self.dns_search = dns_search
self.docker_url = docker_url
self.environment = environment or {}
self._private_environment = private_environment or {}
self.env_file = env_file
self.force_pull = force_pull
self.image = image
self.mem_limit = mem_limit
self.host_tmp_dir = host_tmp_dir
self.network_mode = network_mode
self.tls_ca_cert = tls_ca_cert
self.tls_client_cert = tls_client_cert
self.tls_client_key = tls_client_key
self.tls_verify = tls_verify
self.tls_hostname = tls_hostname
self.tls_ssl_version = tls_ssl_version
self.mount_tmp_dir = mount_tmp_dir
self.tmp_dir = tmp_dir
self.user = user
self.mounts = mounts or []
self.entrypoint = entrypoint
self.working_dir = working_dir
self.xcom_all = xcom_all
self.docker_conn_id = docker_conn_id
self.shm_size = shm_size
self.tty = tty
self.hostname = hostname
self.privileged = privileged
self.cap_add = cap_add
self.extra_hosts = extra_hosts
self.container: dict = None # type: ignore[assignment]
self.retrieve_output = retrieve_output
self.retrieve_output_path = retrieve_output_path
self.timeout = timeout
self.device_requests = device_requests
self.log_opts_max_size = log_opts_max_size
self.log_opts_max_file = log_opts_max_file
self.ipc_mode = ipc_mode
if skip_exit_code is not None:
warnings.warn(
"skip_exit_code is deprecated. Please use skip_on_exit_code",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
skip_on_exit_code = skip_exit_code
self.skip_on_exit_code = (
skip_on_exit_code
if isinstance(skip_on_exit_code, Container)
else [skip_on_exit_code]
if skip_on_exit_code
else []
)
self.port_bindings = port_bindings or {}
if self.port_bindings and self.network_mode == "host":
raise ValueError("Port bindings is not supported in the host network mode")
@cached_property
def hook(self) -> DockerHook:
"""Create and return an DockerHook (cached)."""
tls_config = DockerHook.construct_tls_config(
ca_cert=self.tls_ca_cert,
client_cert=self.tls_client_cert,
client_key=self.tls_client_key,
verify=self.tls_verify,
assert_hostname=self.tls_hostname,
ssl_version=self.tls_ssl_version,
)
return DockerHook(
docker_conn_id=self.docker_conn_id,
base_url=self.docker_url,
version=self.api_version,
tls=tls_config,
timeout=self.timeout,
)
def get_hook(self) -> DockerHook:
"""Create and return an DockerHook (cached)."""
return self.hook
@property
def cli(self) -> APIClient:
return self.hook.api_client
def _run_image(self) -> list[str] | str | None:
"""Run a Docker container with the provided image."""
self.log.info("Starting docker container from image %s", self.image)
if self.mount_tmp_dir:
with TemporaryDirectory(prefix="airflowtmp", dir=self.host_tmp_dir) as host_tmp_dir_generated:
tmp_mount = Mount(self.tmp_dir, host_tmp_dir_generated, "bind")
try:
return self._run_image_with_mounts(self.mounts + [tmp_mount], add_tmp_variable=True)
except APIError as e:
if host_tmp_dir_generated in str(e):
self.log.warning(
"Using remote engine or docker-in-docker and mounting temporary "
"volume from host is not supported. Falling back to "
"`mount_tmp_dir=False` mode. You can set `mount_tmp_dir` parameter"
" to False to disable mounting and remove the warning"
)
return self._run_image_with_mounts(self.mounts, add_tmp_variable=False)
raise
else:
return self._run_image_with_mounts(self.mounts, add_tmp_variable=False)
def _run_image_with_mounts(self, target_mounts, add_tmp_variable: bool) -> list[str] | str | None:
if add_tmp_variable:
self.environment["AIRFLOW_TMP_DIR"] = self.tmp_dir
else:
self.environment.pop("AIRFLOW_TMP_DIR", None)
docker_log_config = {}
if self.log_opts_max_size is not None:
docker_log_config["max-size"] = self.log_opts_max_size
if self.log_opts_max_file is not None:
docker_log_config["max-file"] = self.log_opts_max_file
env_file_vars = {}
if self.env_file is not None:
env_file_vars = self.unpack_environment_variables(self.env_file)
self.container = self.cli.create_container(
command=self.format_command(self.command),
name=self.container_name,
environment={**env_file_vars, **self.environment, **self._private_environment},
ports=list(self.port_bindings),
host_config=self.cli.create_host_config(
auto_remove=False,
mounts=target_mounts,
network_mode=self.network_mode,
shm_size=self.shm_size,
dns=self.dns,
dns_search=self.dns_search,
cpu_shares=int(round(self.cpus * 1024)),
port_bindings=self.port_bindings,
mem_limit=self.mem_limit,
cap_add=self.cap_add,
extra_hosts=self.extra_hosts,
privileged=self.privileged,
device_requests=self.device_requests,
log_config=LogConfig(config=docker_log_config),
ipc_mode=self.ipc_mode,
),
image=self.image,
user=self.user,
entrypoint=self.format_command(self.entrypoint),
working_dir=self.working_dir,
tty=self.tty,
hostname=self.hostname,
)
logstream = self.cli.attach(container=self.container["Id"], stdout=True, stderr=True, stream=True)
try:
self.cli.start(self.container["Id"])
log_lines = []
for log_chunk in logstream:
log_chunk = stringify(log_chunk).strip()
log_lines.append(log_chunk)
self.log.info("%s", log_chunk)
result = self.cli.wait(self.container["Id"])
if result["StatusCode"] in self.skip_on_exit_code:
raise AirflowSkipException(
f"Docker container returned exit code {self.skip_on_exit_code}. Skipping."
)
elif result["StatusCode"] != 0:
joined_log_lines = "\n".join(log_lines)
raise AirflowException(f"Docker container failed: {repr(result)} lines {joined_log_lines}")
if self.retrieve_output:
return self._attempt_to_retrieve_result()
elif self.do_xcom_push:
if len(log_lines) == 0:
return None
try:
if self.xcom_all:
return log_lines
else:
return log_lines[-1]
except StopIteration:
# handle the case when there is not a single line to iterate on
return None
return None
finally:
if self.auto_remove == "success":
self.cli.remove_container(self.container["Id"])
elif self.auto_remove == "force":
self.cli.remove_container(self.container["Id"], force=True)
def _attempt_to_retrieve_result(self):
"""Attempt to pull the result from the expected file.
This uses Docker's ``get_archive`` function. If the file is not yet
ready, *None* is returned.
"""
def copy_from_docker(container_id, src):
archived_result, stat = self.cli.get_archive(container_id, src)
if stat["size"] == 0:
# 0 byte file, it can't be anything else than None
return None
# no need to port to a file since we intend to deserialize
file_standin = BytesIO(b"".join(archived_result))
tar = tarfile.open(fileobj=file_standin)
file = tar.extractfile(stat["name"])
lib = getattr(self, "pickling_library", pickle)
return lib.loads(file.read())
try:
return copy_from_docker(self.container["Id"], self.retrieve_output_path)
except APIError:
return None
def execute(self, context: Context) -> list[str] | str | None:
# Pull the docker image if `force_pull` is set or image does not exist locally
if self.force_pull or not self.cli.images(name=self.image):
self.log.info("Pulling docker image %s", self.image)
latest_status: dict[str, str] = {}
for output in self.cli.pull(self.image, stream=True, decode=True):
if isinstance(output, str):
self.log.info("%s", output)
continue
if isinstance(output, dict) and "status" in output:
output_status = output["status"]
if "id" not in output:
self.log.info("%s", output_status)
continue
output_id = output["id"]
if latest_status.get(output_id) != output_status:
self.log.info("%s: %s", output_id, output_status)
latest_status[output_id] = output_status
return self._run_image()
@staticmethod
def format_command(command: list[str] | str | None) -> list[str] | str | None:
"""Retrieve command(s).
If command string starts with ``[``, the string is treated as a Python
literal and parsed into a list of commands.
:param command: Docker command or entrypoint
:return: the command (or commands)
"""
if isinstance(command, str) and command.strip().find("[") == 0:
command = ast.literal_eval(command)
return command
def on_kill(self) -> None:
if self.hook.client_created:
self.log.info("Stopping docker container")
if self.container is None:
self.log.info("Not attempting to kill container as it was not created")
return
self.cli.stop(self.container["Id"])
@staticmethod
def unpack_environment_variables(env_str: str) -> dict:
r"""Parse environment variables from the string.
:param env_str: environment variables in the ``{key}={value}`` format,
separated by a ``\n`` (newline)
:return: dictionary containing parsed environment variables
"""
return dotenv_values(stream=StringIO(env_str))
| 23,414 | 44.911765 | 110 | py |
airflow | airflow-main/airflow/providers/docker/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/docker/hooks/docker.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
from functools import cached_property
from typing import TYPE_CHECKING, Any
from docker import APIClient, TLSConfig
from docker.constants import DEFAULT_TIMEOUT_SECONDS
from docker.errors import APIError
from airflow.exceptions import AirflowException, AirflowNotFoundException
from airflow.hooks.base import BaseHook
if TYPE_CHECKING:
from airflow.models import Connection
class DockerHook(BaseHook):
"""
Interact with a Docker Daemon and Container Registry.
This class provide a thin wrapper around the ``docker.APIClient``.
.. seealso::
- :ref:`Docker Connection <howto/connection:docker>`
- `Docker SDK: Low-level API <https://docker-py.readthedocs.io/en/stable/api.html?low-level-api>`_
:param docker_conn_id: :ref:`Docker connection id <howto/connection:docker>` where stored credentials
to Docker Registry. If set to ``None`` or empty then hook does not login to Container Registry.
:param base_url: URL to the Docker server.
:param version: The version of the API to use. Use ``auto`` or ``None`` for automatically detect
the server's version.
:param tls: Is connection required TLS, for enable pass ``True`` for use with default options,
or pass a `docker.tls.TLSConfig` object to use custom configurations.
:param timeout: Default timeout for API calls, in seconds.
"""
conn_name_attr = "docker_conn_id"
default_conn_name = "docker_default"
conn_type = "docker"
hook_name = "Docker"
def __init__(
self,
docker_conn_id: str | None = default_conn_name,
base_url: str | None = None,
version: str | None = None,
tls: TLSConfig | bool | None = None,
timeout: int = DEFAULT_TIMEOUT_SECONDS,
) -> None:
super().__init__()
if not base_url:
raise AirflowException("URL to the Docker server not provided.")
elif tls:
if base_url.startswith("tcp://"):
base_url = base_url.replace("tcp://", "https://")
self.log.debug("Change `base_url` schema from 'tcp://' to 'https://'.")
if not base_url.startswith("https://"):
self.log.warning("When `tls` specified then `base_url` expected 'https://' schema.")
self.docker_conn_id = docker_conn_id
self.__base_url = base_url
self.__version = version
self.__tls = tls or False
self.__timeout = timeout
self._client_created = False
@staticmethod
def construct_tls_config(
ca_cert: str | None = None,
client_cert: str | None = None,
client_key: str | None = None,
verify: bool = True,
assert_hostname: str | bool | None = None,
ssl_version: str | None = None,
) -> TLSConfig | bool:
"""
Construct TLSConfig object from parts.
:param ca_cert: Path to a PEM-encoded CA (Certificate Authority) certificate file.
:param client_cert: Path to PEM-encoded certificate file.
:param client_key: Path to PEM-encoded key file.
:param verify: Set ``True`` to verify the validity of the provided certificate.
:param assert_hostname: Hostname to match against the docker server certificate
or ``False`` to disable the check.
:param ssl_version: Version of SSL to use when communicating with docker daemon.
"""
if ca_cert and client_cert and client_key:
# Ignore type error on SSL version here.
# It is deprecated and type annotation is wrong, and it should be string.
return TLSConfig(
ca_cert=ca_cert,
client_cert=(client_cert, client_key),
verify=verify,
ssl_version=ssl_version,
assert_hostname=assert_hostname,
)
return False
@cached_property
def api_client(self) -> APIClient:
"""Create connection to docker host and return ``docker.APIClient`` (cached)."""
client = APIClient(
base_url=self.__base_url, version=self.__version, tls=self.__tls, timeout=self.__timeout
)
if self.docker_conn_id:
# Obtain connection and try to login to Container Registry only if ``docker_conn_id`` set.
self.__login(client, self.get_connection(self.docker_conn_id))
self._client_created = True
return client
@property
def client_created(self) -> bool:
"""Is api_client created or not."""
return self._client_created
def get_conn(self) -> APIClient:
"""Create connection to docker host and return ``docker.APIClient`` (cached)."""
return self.api_client
def __login(self, client, conn: Connection) -> None:
if not conn.host:
raise AirflowNotFoundException("No Docker Registry URL provided.")
if not conn.login:
raise AirflowNotFoundException("No Docker Registry username provided.")
registry = f"{conn.host}:{conn.port}" if conn.port else conn.host
# Parse additional optional parameters
email = conn.extra_dejson.get("email") or None
reauth = conn.extra_dejson.get("reauth", True)
if isinstance(reauth, str):
reauth = reauth.lower()
if reauth in ("y", "yes", "t", "true", "on", "1"):
reauth = True
elif reauth in ("n", "no", "f", "false", "off", "0"):
reauth = False
else:
raise ValueError(f"Unable parse `reauth` value {reauth!r} to bool.")
try:
self.log.info("Login into Docker Registry: %s", registry)
client.login(
username=conn.login, password=conn.password, registry=registry, email=email, reauth=reauth
)
self.log.debug("Login successful")
except APIError:
self.log.error("Login failed")
raise
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection form widgets."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import BooleanField, StringField
return {
"reauth": BooleanField(
lazy_gettext("Reauthenticate"),
description="Whether or not to refresh existing authentication on the Docker server.",
),
"email": StringField(lazy_gettext("Email"), widget=BS3TextFieldWidget()),
}
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema"],
"relabeling": {
"host": "Registry URL",
"login": "Username",
},
"placeholders": {
"extra": json.dumps(
{
"reauth": False,
"email": "[email protected]",
}
)
},
}
| 7,940 | 38.311881 | 106 | py |
airflow | airflow-main/airflow/providers/docker/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/tableau/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-tableau:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,532 | 35.5 | 116 | py |
airflow | airflow-main/airflow/providers/tableau/operators/tableau.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.tableau.hooks.tableau import (
TableauHook,
TableauJobFailedException,
TableauJobFinishCode,
)
if TYPE_CHECKING:
from airflow.utils.context import Context
RESOURCES_METHODS = {
"datasources": ["delete", "refresh"],
"groups": ["delete"],
"projects": ["delete"],
"schedule": ["delete"],
"sites": ["delete"],
"subscriptions": ["delete"],
"tasks": ["delete", "run"],
"users": ["remove"],
"workbooks": ["delete", "refresh"],
}
class TableauOperator(BaseOperator):
"""
Execute a Tableau API Resource.
https://tableau.github.io/server-client-python/docs/api-ref
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:TableauOperator`
:param resource: The name of the resource to use.
:param method: The name of the resource's method to execute.
:param find: The reference of resource that will receive the action.
:param match_with: The resource field name to be matched with find parameter.
:param site_id: The id of the site where the workbook belongs to.
:param blocking_refresh: By default will be blocking means it will wait until it has finished.
:param check_interval: time in seconds that the job should wait in
between each instance state checks until operation is completed
:param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>`
containing the credentials to authenticate to the Tableau Server.
"""
template_fields: Sequence[str] = (
"find",
"match_with",
)
def __init__(
self,
*,
resource: str,
method: str,
find: str,
match_with: str = "id",
site_id: str | None = None,
blocking_refresh: bool = True,
check_interval: float = 20,
tableau_conn_id: str = "tableau_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.resource = resource
self.method = method
self.find = find
self.match_with = match_with
self.check_interval = check_interval
self.site_id = site_id
self.blocking_refresh = blocking_refresh
self.tableau_conn_id = tableau_conn_id
def execute(self, context: Context) -> str:
"""
Executes the Tableau API resource and pushes the job id or downloaded file URI to xcom.
:param context: The task context during execution.
:return: the id of the job that executes the extract refresh or downloaded file URI.
"""
available_resources = RESOURCES_METHODS.keys()
if self.resource not in available_resources:
error_message = f"Resource not found! Available Resources: {available_resources}"
raise AirflowException(error_message)
available_methods = RESOURCES_METHODS[self.resource]
if self.method not in available_methods:
error_message = f"Method not found! Available methods for {self.resource}: {available_methods}"
raise AirflowException(error_message)
with TableauHook(self.site_id, self.tableau_conn_id) as tableau_hook:
resource = getattr(tableau_hook.server, self.resource)
method = getattr(resource, self.method)
resource_id = self._get_resource_id(tableau_hook)
response = method(resource_id)
job_id = response.id
if self.method == "refresh":
if self.blocking_refresh:
if not tableau_hook.wait_for_state(
job_id=job_id,
check_interval=self.check_interval,
target_state=TableauJobFinishCode.SUCCESS,
):
raise TableauJobFailedException(f"The Tableau Refresh {self.resource} Job failed!")
return job_id
def _get_resource_id(self, tableau_hook: TableauHook) -> str:
if self.match_with == "id":
return self.find
for resource in tableau_hook.get_all(resource_name=self.resource):
if getattr(resource, self.match_with) == self.find:
resource_id = resource.id
self.log.info("Found matching with id %s", resource_id)
return resource_id
raise AirflowException(f"{self.resource} with {self.match_with} {self.find} not found!")
| 5,416 | 36.10274 | 107 | py |
airflow | airflow-main/airflow/providers/tableau/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/tableau/hooks/tableau.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
import warnings
from enum import Enum
from typing import Any
from tableauserverclient import Pager, PersonalAccessTokenAuth, Server, TableauAuth
from tableauserverclient.server import Auth
from airflow.exceptions import AirflowException, AirflowProviderDeprecationWarning
from airflow.hooks.base import BaseHook
def parse_boolean(val: str) -> str | bool:
"""Try to parse a string into boolean.
The string is returned as-is if it does not look like a boolean value.
"""
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return True
if val in ("n", "no", "f", "false", "off", "0"):
return False
return val
class TableauJobFailedException(AirflowException):
"""An exception that indicates that a Job failed to complete."""
class TableauJobFinishCode(Enum):
"""
The finish code indicates the status of the job.
.. seealso:: https://help.tableau.com/current/api/rest_api/en-us/REST/rest_api_ref.htm#query_job
"""
PENDING = -1
SUCCESS = 0
ERROR = 1
CANCELED = 2
class TableauHook(BaseHook):
"""
Connects to the Tableau Server Instance and allows to communicate with it.
Can be used as a context manager: automatically authenticates the connection
when opened and signs out when closed.
.. seealso:: https://tableau.github.io/server-client-python/docs/
:param site_id: The id of the site where the workbook belongs to.
It will connect to the default site if you don't provide an id.
:param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>`
containing the credentials to authenticate to the Tableau Server.
"""
conn_name_attr = "tableau_conn_id"
default_conn_name = "tableau_default"
conn_type = "tableau"
hook_name = "Tableau"
def __init__(self, site_id: str | None = None, tableau_conn_id: str = default_conn_name) -> None:
super().__init__()
self.tableau_conn_id = tableau_conn_id
self.conn = self.get_connection(self.tableau_conn_id)
self.site_id = site_id or self.conn.extra_dejson.get("site_id", "")
self.server = Server(self.conn.host)
verify: Any = self.conn.extra_dejson.get("verify", True)
if isinstance(verify, str):
verify = parse_boolean(verify)
self.server.add_http_options(
options_dict={"verify": verify, "cert": self.conn.extra_dejson.get("cert", None)}
)
self.server.use_server_version()
self.tableau_conn = None
def __enter__(self):
if not self.tableau_conn:
self.tableau_conn = self.get_conn()
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.server.auth.sign_out()
def get_conn(self) -> Auth.contextmgr:
"""
Sign in to the Tableau Server.
:return: an authorized Tableau Server Context Manager object.
"""
if self.conn.login and self.conn.password:
return self._auth_via_password()
if "token_name" in self.conn.extra_dejson and "personal_access_token" in self.conn.extra_dejson:
return self._auth_via_token()
raise NotImplementedError("No Authentication method found for given Credentials!")
def _auth_via_password(self) -> Auth.contextmgr:
tableau_auth = TableauAuth(
username=self.conn.login, password=self.conn.password, site_id=self.site_id
)
return self.server.auth.sign_in(tableau_auth)
def _auth_via_token(self) -> Auth.contextmgr:
"""The method is deprecated. Please, use the authentication via password instead."""
warnings.warn(
"Authentication via personal access token is deprecated. "
"Please, use the password authentication to avoid inconsistencies.",
AirflowProviderDeprecationWarning,
)
tableau_auth = PersonalAccessTokenAuth(
token_name=self.conn.extra_dejson["token_name"],
personal_access_token=self.conn.extra_dejson["personal_access_token"],
site_id=self.site_id,
)
return self.server.auth.sign_in_with_personal_access_token(tableau_auth)
def get_all(self, resource_name: str) -> Pager:
"""
Get all items of the given resource.
.. see also:: https://tableau.github.io/server-client-python/docs/page-through-results
:param resource_name: The name of the resource to paginate.
For example: jobs or workbooks.
:return: all items by returning a Pager.
"""
try:
resource = getattr(self.server, resource_name)
except AttributeError:
raise ValueError(f"Resource name {resource_name} is not found.")
return Pager(resource.get)
def get_job_status(self, job_id: str) -> TableauJobFinishCode:
"""
Get the current state of a defined Tableau Job.
.. see also:: https://tableau.github.io/server-client-python/docs/api-ref#jobs
:param job_id: The id of the job to check.
:return: An Enum that describe the Tableau job's return code
"""
return TableauJobFinishCode(int(self.server.jobs.get_by_id(job_id).finish_code))
def wait_for_state(self, job_id: str, target_state: TableauJobFinishCode, check_interval: float) -> bool:
"""
Wait until the current state of a defined Tableau Job is target_state or different from PENDING.
:param job_id: The id of the job to check.
:param target_state: Enum that describe the Tableau job's target state
:param check_interval: time in seconds that the job should wait in
between each instance state checks until operation is completed
:return: return True if the job is equal to the target_status, False otherwise.
"""
finish_code = self.get_job_status(job_id=job_id)
while finish_code == TableauJobFinishCode.PENDING and finish_code != target_state:
self.log.info("job state: %s", finish_code)
time.sleep(check_interval)
finish_code = self.get_job_status(job_id=job_id)
return finish_code == target_state
| 7,062 | 38.238889 | 109 | py |
airflow | airflow-main/airflow/providers/tableau/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/tableau/sensors/tableau.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.providers.tableau.hooks.tableau import (
TableauHook,
TableauJobFailedException,
TableauJobFinishCode,
)
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class TableauJobStatusSensor(BaseSensorOperator):
"""
Watches the status of a Tableau Server Job.
.. seealso:: https://tableau.github.io/server-client-python/docs/api-ref#jobs
:param job_id: Id of the job to watch.
:param site_id: The id of the site where the workbook belongs to.
:param tableau_conn_id: The :ref:`Tableau Connection id <howto/connection:tableau>`
containing the credentials to authenticate to the Tableau Server.
"""
template_fields: Sequence[str] = ("job_id",)
def __init__(
self,
*,
job_id: str,
site_id: str | None = None,
tableau_conn_id: str = "tableau_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.tableau_conn_id = tableau_conn_id
self.job_id = job_id
self.site_id = site_id
def poke(self, context: Context) -> bool:
"""
Pokes until the job has successfully finished.
:param context: The task context during execution.
:return: True if it succeeded and False if not.
"""
with TableauHook(self.site_id, self.tableau_conn_id) as tableau_hook:
finish_code = tableau_hook.get_job_status(job_id=self.job_id)
self.log.info("Current finishCode is %s (%s)", finish_code.name, finish_code.value)
if finish_code in (TableauJobFinishCode.ERROR, TableauJobFinishCode.CANCELED):
raise TableauJobFailedException("The Tableau Refresh Workbook Job failed!")
return finish_code == TableauJobFinishCode.SUCCESS
| 2,701 | 35.513514 | 95 | py |
airflow | airflow-main/airflow/providers/tableau/sensors/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/smtp/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "1.2.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-smtp:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,529 | 35.428571 | 113 | py |
airflow | airflow-main/airflow/providers/smtp/notifications/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.