repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
airflow | airflow-main/airflow/providers/snowflake/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/snowflake/hooks/snowflake_sql_api.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import uuid
from datetime import timedelta
from pathlib import Path
from typing import Any
import aiohttp
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from airflow import AirflowException
from airflow.providers.snowflake.hooks.snowflake import SnowflakeHook
from airflow.providers.snowflake.utils.sql_api_generate_jwt import JWTGenerator
class SnowflakeSqlApiHook(SnowflakeHook):
"""
A client to interact with Snowflake using SQL API and submit multiple SQL statements in a single request.
In combination with aiohttp, make post request to submit SQL statements for execution,
poll to check the status of the execution of a statement. Fetch query results asynchronously.
This hook requires the snowflake_conn_id connection. This hooks mainly uses account, schema, database,
warehouse, private_key_file or private_key_content field must be setup in the connection. Other inputs
can be defined in the connection or hook instantiation.
:param snowflake_conn_id: Reference to
:ref:`Snowflake connection id<howto/connection:snowflake>`
:param account: snowflake account name
:param authenticator: authenticator for Snowflake.
'snowflake' (default) to use the internal Snowflake authenticator
'externalbrowser' to authenticate using your web browser and
Okta, ADFS or any other SAML 2.0-compliant identify provider
(IdP) that has been defined for your account
'https://<your_okta_account_name>.okta.com' to authenticate
through native Okta.
:param warehouse: name of snowflake warehouse
:param database: name of snowflake database
:param region: name of snowflake region
:param role: name of snowflake role
:param schema: name of snowflake schema
:param session_parameters: You can set session-level parameters at
the time you connect to Snowflake
:param token_life_time: lifetime of the JWT Token in timedelta
:param token_renewal_delta: Renewal time of the JWT Token in timedelta
:param deferrable: Run operator in the deferrable mode.
"""
LIFETIME = timedelta(minutes=59) # The tokens will have a 59 minute lifetime
RENEWAL_DELTA = timedelta(minutes=54) # Tokens will be renewed after 54 minutes
def __init__(
self,
snowflake_conn_id: str,
token_life_time: timedelta = LIFETIME,
token_renewal_delta: timedelta = RENEWAL_DELTA,
*args: Any,
**kwargs: Any,
):
self.snowflake_conn_id = snowflake_conn_id
self.token_life_time = token_life_time
self.token_renewal_delta = token_renewal_delta
super().__init__(snowflake_conn_id, *args, **kwargs)
self.private_key: Any = None
def get_private_key(self) -> None:
"""Gets the private key from snowflake connection."""
conn = self.get_connection(self.snowflake_conn_id)
# If private_key_file is specified in the extra json, load the contents of the file as a private key.
# If private_key_content is specified in the extra json, use it as a private key.
# As a next step, specify this private key in the connection configuration.
# The connection password then becomes the passphrase for the private key.
# If your private key is not encrypted (not recommended), then leave the password empty.
private_key_file = conn.extra_dejson.get(
"extra__snowflake__private_key_file"
) or conn.extra_dejson.get("private_key_file")
private_key_content = conn.extra_dejson.get(
"extra__snowflake__private_key_content"
) or conn.extra_dejson.get("private_key_content")
private_key_pem = None
if private_key_content and private_key_file:
raise AirflowException(
"The private_key_file and private_key_content extra fields are mutually exclusive. "
"Please remove one."
)
elif private_key_file:
private_key_pem = Path(private_key_file).read_bytes()
elif private_key_content:
private_key_pem = private_key_content.encode()
if private_key_pem:
passphrase = None
if conn.password:
passphrase = conn.password.strip().encode()
self.private_key = serialization.load_pem_private_key(
private_key_pem, password=passphrase, backend=default_backend()
)
def execute_query(
self, sql: str, statement_count: int, query_tag: str = "", bindings: dict[str, Any] | None = None
) -> list[str]:
"""
Using SnowflakeSQL API, run the query in snowflake by making API request.
:param sql: the sql string to be executed with possibly multiple statements
:param statement_count: set the MULTI_STATEMENT_COUNT field to the number of SQL statements
in the request
:param query_tag: (Optional) Query tag that you want to associate with the SQL statement.
For details, see https://docs.snowflake.com/en/sql-reference/parameters.html#label-query-tag
parameter.
:param bindings: (Optional) Values of bind variables in the SQL statement.
When executing the statement, Snowflake replaces placeholders (? and :name) in
the statement with these specified values.
"""
conn_config = self._get_conn_params()
req_id = uuid.uuid4()
url = (
f"https://{conn_config['account']}.{conn_config['region']}"
f".snowflakecomputing.com/api/v2/statements"
)
params: dict[str, Any] | None = {"requestId": str(req_id), "async": True, "pageSize": 10}
headers = self.get_headers()
if bindings is None:
bindings = {}
data = {
"statement": sql,
"resultSetMetaData": {"format": "json"},
"database": conn_config["database"],
"schema": conn_config["schema"],
"warehouse": conn_config["warehouse"],
"role": conn_config["role"],
"bindings": bindings,
"parameters": {
"MULTI_STATEMENT_COUNT": statement_count,
"query_tag": query_tag,
},
}
response = requests.post(url, json=data, headers=headers, params=params)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e: # pragma: no cover
raise AirflowException(f"Response: {e.response.content} Status Code: {e.response.status_code}")
json_response = response.json()
self.log.info("Snowflake SQL POST API response: %s", json_response)
if "statementHandles" in json_response:
self.query_ids = json_response["statementHandles"]
elif "statementHandle" in json_response:
self.query_ids.append(json_response["statementHandle"])
else:
raise AirflowException("No statementHandle/statementHandles present in response")
return self.query_ids
def get_headers(self) -> dict[str, Any]:
"""Form JWT Token and header based on the private key, and connection details."""
if not self.private_key:
self.get_private_key()
conn_config = self._get_conn_params()
# Get the JWT token from the connection details and the private key
token = JWTGenerator(
conn_config["account"], # type: ignore[arg-type]
conn_config["user"], # type: ignore[arg-type]
private_key=self.private_key,
lifetime=self.token_life_time,
renewal_delay=self.token_renewal_delta,
).get_token()
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {token}",
"Accept": "application/json",
"User-Agent": "snowflakeSQLAPI/1.0",
"X-Snowflake-Authorization-Token-Type": "KEYPAIR_JWT",
}
return headers
def get_request_url_header_params(self, query_id: str) -> tuple[dict[str, Any], dict[str, Any], str]:
"""
Build the request header Url with account name identifier and query id from the connection params.
:param query_id: statement handles query ids for the individual statements.
"""
conn_config = self._get_conn_params()
req_id = uuid.uuid4()
header = self.get_headers()
params = {"requestId": str(req_id)}
url = (
f"https://{conn_config['account']}.{conn_config['region']}"
f".snowflakecomputing.com/api/v2/statements/{query_id}"
)
return header, params, url
def check_query_output(self, query_ids: list[str]) -> None:
"""
Make HTTP request to snowflake SQL API based on the provided query ids and log the response.
:param query_ids: statement handles query id for the individual statements.
"""
for query_id in query_ids:
header, params, url = self.get_request_url_header_params(query_id)
try:
response = requests.get(url, headers=header, params=params)
response.raise_for_status()
self.log.info(response.json())
except requests.exceptions.HTTPError as e:
raise AirflowException(
f"Response: {e.response.content}, Status Code: {e.response.status_code}"
)
def _process_response(self, status_code, resp):
self.log.info("Snowflake SQL GET statements status API response: %s", resp)
if status_code == 202:
return {"status": "running", "message": "Query statements are still running"}
elif status_code == 422:
return {"status": "error", "message": resp["message"]}
elif status_code == 200:
statement_handles = []
if "statementHandles" in resp and resp["statementHandles"]:
statement_handles = resp["statementHandles"]
elif "statementHandle" in resp and resp["statementHandle"]:
statement_handles.append(resp["statementHandle"])
return {
"status": "success",
"message": resp["message"],
"statement_handles": statement_handles,
}
else:
return {"status": "error", "message": resp["message"]}
def get_sql_api_query_status(self, query_id: str) -> dict[str, str | list[str]]:
"""
Based on the query id async HTTP request is made to snowflake SQL API and return response.
:param query_id: statement handle id for the individual statements.
"""
self.log.info("Retrieving status for query id %s", query_id)
header, params, url = self.get_request_url_header_params(query_id)
response = requests.get(url, params=params, headers=header)
status_code = response.status_code
resp = response.json()
return self._process_response(status_code, resp)
async def get_sql_api_query_status_async(self, query_id: str) -> dict[str, str | list[str]]:
"""
Based on the query id async HTTP request is made to snowflake SQL API and return response.
:param query_id: statement handle id for the individual statements.
"""
self.log.info("Retrieving status for query id %s", query_id)
header, params, url = self.get_request_url_header_params(query_id)
async with aiohttp.ClientSession(headers=header) as session:
async with session.get(url, params=params) as response:
status_code = response.status
resp = await response.json()
return self._process_response(status_code, resp)
| 12,677 | 44.44086 | 109 | py |
airflow | airflow-main/airflow/providers/snowflake/utils/common.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
def enclose_param(param: str) -> str:
"""
Replace all single quotes in parameter by two single quotes and enclose param in single quote.
.. seealso::
https://docs.snowflake.com/en/sql-reference/data-types-text.html#single-quoted-string-constants
Examples:
.. code-block:: python
enclose_param("without quotes") # Returns: 'without quotes'
enclose_param("'with quotes'") # Returns: '''with quotes'''
enclose_param("Today's sales projections") # Returns: 'Today''s sales projections'
enclose_param("sample/john's.csv") # Returns: 'sample/john''s.csv'
enclose_param(".*'awesome'.*[.]csv") # Returns: '.*''awesome''.*[.]csv'
:param param: parameter which required single quotes enclosure.
"""
return f"""'{param.replace("'", "''")}'"""
| 1,644 | 41.179487 | 103 | py |
airflow | airflow-main/airflow/providers/snowflake/utils/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/snowflake/utils/sql_api_generate_jwt.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import base64
import hashlib
import logging
from datetime import datetime, timedelta, timezone
from typing import Any
# This class relies on the PyJWT module (https://pypi.org/project/PyJWT/).
import jwt
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
logger = logging.getLogger(__name__)
ISSUER = "iss"
EXPIRE_TIME = "exp"
ISSUE_TIME = "iat"
SUBJECT = "sub"
# If you generated an encrypted private key, implement this method to return
# the passphrase for decrypting your private key. As an example, this function
# prompts the user for the passphrase.
class JWTGenerator:
"""
Creates and signs a JWT with the specified private key file, username, and account identifier.
The JWTGenerator keeps the generated token and only regenerates the token if a specified period
of time has passed.
Creates an object that generates JWTs for the specified user, account identifier, and private key
:param account: Your Snowflake account identifier.
See https://docs.snowflake.com/en/user-guide/admin-account-identifier.html. Note that if you are using
the account locator, exclude any region information from the account locator.
:param user: The Snowflake username.
:param private_key: Private key from the file path for signing the JWTs.
:param lifetime: The number of minutes (as a timedelta) during which the key will be valid.
:param renewal_delay: The number of minutes (as a timedelta) from now after which the JWT
generator should renew the JWT.
"""
LIFETIME = timedelta(minutes=59) # The tokens will have a 59 minute lifetime
RENEWAL_DELTA = timedelta(minutes=54) # Tokens will be renewed after 54 minutes
ALGORITHM = "RS256" # Tokens will be generated using RSA with SHA256
def __init__(
self,
account: str,
user: str,
private_key: Any,
lifetime: timedelta = LIFETIME,
renewal_delay: timedelta = RENEWAL_DELTA,
):
logger.info(
"""Creating JWTGenerator with arguments
account : %s, user : %s, lifetime : %s, renewal_delay : %s""",
account,
user,
lifetime,
renewal_delay,
)
# Construct the fully qualified name of the user in uppercase.
self.account = self.prepare_account_name_for_jwt(account)
self.user = user.upper()
self.qualified_username = self.account + "." + self.user
self.lifetime = lifetime
self.renewal_delay = renewal_delay
self.private_key = private_key
self.renew_time = datetime.now(timezone.utc)
self.token: str | None = None
def prepare_account_name_for_jwt(self, raw_account: str) -> str:
"""
Prepare the account identifier for use in the JWT.
For the JWT, the account identifier must not include the subdomain or any region or cloud provider
information.
:param raw_account: The specified account identifier.
"""
account = raw_account
if ".global" not in account:
# Handle the general case.
idx = account.find(".")
if idx > 0:
account = account[0:idx]
else:
# Handle the replication case.
idx = account.find("-")
if idx > 0:
account = account[0:idx] # pragma: no cover
# Use uppercase for the account identifier.
return account.upper()
def get_token(self) -> str | None:
"""
Generates a new JWT.
If a JWT has been already been generated earlier, return the previously
generated token unless the specified renewal time has passed.
"""
now = datetime.now(timezone.utc) # Fetch the current time
# If the token has expired or doesn't exist, regenerate the token.
if self.token is None or self.renew_time <= now:
logger.info(
"Generating a new token because the present time (%s) is later than the renewal time (%s)",
now,
self.renew_time,
)
# Calculate the next time we need to renew the token.
self.renew_time = now + self.renewal_delay
# Prepare the fields for the payload.
# Generate the public key fingerprint for the issuer in the payload.
public_key_fp = self.calculate_public_key_fingerprint(self.private_key)
# Create our payload
payload = {
# Set the issuer to the fully qualified username concatenated with the public key fingerprint.
ISSUER: self.qualified_username + "." + public_key_fp,
# Set the subject to the fully qualified username.
SUBJECT: self.qualified_username,
# Set the issue time to now.
ISSUE_TIME: now,
# Set the expiration time, based on the lifetime specified for this object.
EXPIRE_TIME: now + self.lifetime,
}
# Regenerate the actual token
token = jwt.encode(payload, key=self.private_key, algorithm=JWTGenerator.ALGORITHM)
if isinstance(token, bytes):
token = token.decode("utf-8")
self.token = token
return self.token
def calculate_public_key_fingerprint(self, private_key: Any) -> str:
"""
Given a private key in PEM format, return the public key fingerprint.
:param private_key: private key
"""
# Get the raw bytes of public key.
public_key_raw = private_key.public_key().public_bytes(
Encoding.DER, PublicFormat.SubjectPublicKeyInfo
)
# Get the sha256 hash of the raw bytes.
sha256hash = hashlib.sha256()
sha256hash.update(public_key_raw)
# Base64-encode the value and prepend the prefix 'SHA256:'.
public_key_fp = "SHA256:" + base64.b64encode(sha256hash.digest()).decode("utf-8")
return public_key_fp
| 6,889 | 37.49162 | 110 | py |
airflow | airflow-main/airflow/providers/qubole/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.4.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-qubole:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,531 | 35.47619 | 115 | py |
airflow | airflow-main/airflow/providers/qubole/operators/qubole_check.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Callable, Sequence
from airflow.exceptions import AirflowException
from airflow.providers.common.sql.operators.sql import SQLCheckOperator, SQLValueCheckOperator
from airflow.providers.qubole.hooks.qubole_check import QuboleCheckHook
from airflow.providers.qubole.operators.qubole import QuboleOperator
class _QuboleCheckOperatorMixin:
"""This is a Mixin for Qubole related check operators."""
kwargs: dict
results_parser_callable: Callable | None
def execute(self, context=None) -> None:
"""Execute a check operation against Qubole."""
try:
self._hook_context = context
super().execute(context=context) # type: ignore[misc]
except AirflowException as e:
handle_airflow_exception(e, self.get_hook())
def get_db_hook(self) -> QuboleCheckHook:
"""Get QuboleCheckHook."""
return self.get_hook()
def get_hook(self) -> QuboleCheckHook:
"""
Reinitialising the hook, as some template fields might have changed.
This method overwrites the original QuboleOperator.get_hook() which returns a QuboleHook.
"""
return QuboleCheckHook(
context=self._hook_context, results_parser_callable=self.results_parser_callable, **self.kwargs
)
class QuboleCheckOperator(_QuboleCheckOperatorMixin, SQLCheckOperator, QuboleOperator):
"""
Performs checks against Qubole Commands.
``QuboleCheckOperator`` expects a command that will be executed on QDS.
By default, each value on first row of the result of this Qubole Command
is evaluated using python ``bool`` casting. If any of the
values return ``False``, the check is failed and errors out.
Note that Python bool casting evals the following as ``False``:
* ``False``
* ``0``
* Empty string (``""``)
* Empty list (``[]``)
* Empty dictionary or set (``{}``)
Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if
the count ``== 0``. You can craft much more complex query that could,
for instance, check that the table has the same number of rows as
the source table upstream, or that the count of today's partition is
greater than yesterday's partition, or that a set of metrics are less
than 3 standard deviation for the 7 day average.
This operator can be used as a data quality check in your pipeline, and
depending on where you put it in your DAG, you have the choice to
stop the critical path, preventing from
publishing dubious data, or on the side and receive email alerts
without stopping the progress of the DAG.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:QuboleCheckOperator`
:param qubole_conn_id: Connection id which consists of qds auth_token
:param results_parser_callable: This is an optional parameter to extend the flexibility of parsing the
results of Qubole command to the users. This is a Python callable which can hold the logic to parse
list of rows returned by Qubole command. By default, only the values on first row are used for
performing checks. This callable should return a list of records on which the checks have to be
performed.
kwargs:
Arguments specific to Qubole command can be referred from QuboleOperator docs.
.. note:: All fields in common with template fields of
QuboleOperator and SQLCheckOperator are template-supported.
"""
template_fields: Sequence[str] = tuple(
set(QuboleOperator.template_fields) | set(SQLCheckOperator.template_fields)
)
template_ext = QuboleOperator.template_ext
ui_fgcolor = "#000"
def __init__(
self,
*,
qubole_conn_id: str = "qubole_default",
results_parser_callable: Callable | None = None,
**kwargs,
) -> None:
sql = get_sql_from_qbol_cmd(kwargs)
kwargs.pop("sql", None)
super().__init__(qubole_conn_id=qubole_conn_id, sql=sql, **kwargs)
self.results_parser_callable = results_parser_callable
self.on_failure_callback = QuboleCheckHook.handle_failure_retry
self.on_retry_callback = QuboleCheckHook.handle_failure_retry
self._hook_context = None
# TODO(xinbinhuang): refactor to reduce levels of inheritance
class QuboleValueCheckOperator(_QuboleCheckOperatorMixin, SQLValueCheckOperator, QuboleOperator):
"""
Performs a simple value check using Qubole command.
By default, each value on the first row of this
Qubole command is compared with a pre-defined value.
The check fails and errors out if the output of the command
is not within the permissible limit of expected value.
:param qubole_conn_id: Connection id which consists of qds auth_token
:param pass_value: Expected value of the query results.
:param tolerance: Defines the permissible pass_value range, for example if tolerance is 2, the Qubole
command output can be anything between -2*pass_value and 2*pass_value, without the operator erring
out.
:param results_parser_callable: This is an optional parameter to extend the flexibility of parsing the
results of Qubole command to the users. This is a Python callable which can hold the logic to parse
list of rows returned by Qubole command. By default, only the values on first row are used for
performing checks. This callable should return a list of records on which the checks have to be
performed.
kwargs:
Arguments specific to Qubole command can be referred from QuboleOperator docs.
.. note:: All fields in common with template fields of
QuboleOperator and SQLValueCheckOperator are template-supported.
"""
template_fields = tuple(set(QuboleOperator.template_fields) | set(SQLValueCheckOperator.template_fields))
template_ext = QuboleOperator.template_ext
ui_fgcolor = "#000"
def __init__(
self,
*,
pass_value: str | int | float,
tolerance: int | float | None = None,
results_parser_callable: Callable | None = None,
qubole_conn_id: str = "qubole_default",
**kwargs,
) -> None:
sql = get_sql_from_qbol_cmd(kwargs)
kwargs.pop("sql", None)
super().__init__(
qubole_conn_id=qubole_conn_id, sql=sql, pass_value=pass_value, tolerance=tolerance, **kwargs
)
self.results_parser_callable = results_parser_callable
self.on_failure_callback = QuboleCheckHook.handle_failure_retry
self.on_retry_callback = QuboleCheckHook.handle_failure_retry
self._hook_context = None
def get_sql_from_qbol_cmd(params) -> str:
"""Get Qubole sql from Qubole command."""
sql = ""
if "query" in params:
sql = params["query"]
elif "sql" in params:
sql = params["sql"]
return sql
def handle_airflow_exception(airflow_exception, hook: QuboleCheckHook):
"""Qubole check handle Airflow exception."""
cmd = hook.cmd
if cmd is not None:
if cmd.is_success(cmd.status):
qubole_command_results = hook.get_query_results()
qubole_command_id = cmd.id
exception_message = (
f"\nQubole Command Id: {qubole_command_id}\nQubole Command Results:\n{qubole_command_results}"
)
raise AirflowException(str(airflow_exception) + exception_message)
raise AirflowException(str(airflow_exception))
| 8,390 | 40.132353 | 110 | py |
airflow | airflow-main/airflow/providers/qubole/operators/qubole.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Qubole operator."""
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Sequence
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator, BaseOperatorLink, XCom
from airflow.providers.qubole.hooks.qubole import (
COMMAND_ARGS,
HYPHEN_ARGS,
POSITIONAL_ARGS,
QuboleHook,
flatten_list,
)
if TYPE_CHECKING:
from airflow.models.taskinstancekey import TaskInstanceKey
from airflow.utils.context import Context
class QDSLink(BaseOperatorLink):
"""Link to QDS."""
name = "Go to QDS"
def get_link(
self,
operator: BaseOperator,
*,
ti_key: TaskInstanceKey,
) -> str:
"""
Get link to qubole command result page.
:param operator: operator
:return: url link
"""
conn = BaseHook.get_connection(
getattr(operator, "qubole_conn_id", None)
or operator.kwargs["qubole_conn_id"] # type: ignore[attr-defined]
)
if conn and conn.host:
host = re.sub(r"api$", "v2/analyze?command_id=", conn.host)
else:
host = "https://api.qubole.com/v2/analyze?command_id="
qds_command_id = XCom.get_value(key="qbol_cmd_id", ti_key=ti_key)
url = host + str(qds_command_id) if qds_command_id else ""
return url
class QuboleOperator(BaseOperator):
"""
Execute tasks (commands) on QDS (https://qubole.com).
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:QuboleOperator`
:param qubole_conn_id: Connection id which consists of qds auth_token
kwargs:
:command_type: type of command to be executed, e.g. hivecmd, shellcmd, hadoopcmd
:tags: array of tags to be assigned with the command
:cluster_label: cluster label on which the command will be executed
:name: name to be given to command
:notify: whether to send email on command completion or not (default is False)
**Arguments specific to command types**
hivecmd:
:query: inline query statement
:script_location: s3 location containing query statement
:sample_size: size of sample in bytes on which to run query
:macros: macro values which were used in query
:sample_size: size of sample in bytes on which to run query
:hive-version: Specifies the hive version to be used. eg: 0.13,1.2,etc.
prestocmd:
:query: inline query statement
:script_location: s3 location containing query statement
:macros: macro values which were used in query
hadoopcmd:
:sub_commnad: must be one these ["jar", "s3distcp", "streaming"] followed by
1 or more args
shellcmd:
:script: inline command with args
:script_location: s3 location containing query statement
:files: list of files in s3 bucket as file1,file2 format. These files will be
copied into the working directory where the qubole command is being
executed.
:archives: list of archives in s3 bucket as archive1,archive2 format. These
will be unarchived into the working directory where the qubole command is
being executed
:parameters: any extra args which need to be passed to script (only when
script_location is supplied)
pigcmd:
:script: inline query statement (latin_statements)
:script_location: s3 location containing pig query
:parameters: any extra args which need to be passed to script (only when
script_location is supplied
sparkcmd:
:program: the complete Spark Program in Scala, R, or Python
:cmdline: spark-submit command line, all required arguments must be specify
in cmdline itself.
:sql: inline sql query
:script_location: s3 location containing query statement
:language: language of the program, Scala, R, or Python
:app_id: ID of an Spark job server app
:arguments: spark-submit command line arguments.
If `cmdline` is selected, this should not be used because all
required arguments and configurations are to be passed in the `cmdline` itself.
:user_program_arguments: arguments that the user program takes in
:macros: macro values which were used in query
:note_id: Id of the Notebook to run
dbtapquerycmd:
:db_tap_id: data store ID of the target database, in Qubole.
:query: inline query statement
:macros: macro values which were used in query
dbexportcmd:
:mode: Can be 1 for Hive export or 2 for HDFS/S3 export
:schema: Db schema name assumed accordingly by database if not specified
:hive_table: Name of the hive table
:partition_spec: partition specification for Hive table.
:dbtap_id: data store ID of the target database, in Qubole.
:db_table: name of the db table
:db_update_mode: allowinsert or updateonly
:db_update_keys: columns used to determine the uniqueness of rows
:export_dir: HDFS/S3 location from which data will be exported.
:fields_terminated_by: hex of the char used as column separator in the dataset
:use_customer_cluster: To use cluster to run command
:customer_cluster_label: the label of the cluster to run the command on
:additional_options: Additional Sqoop options which are needed enclose options in
double or single quotes e.g. '--map-column-hive id=int,data=string'
dbimportcmd:
:mode: 1 (simple), 2 (advance)
:hive_table: Name of the hive table
:schema: Db schema name assumed accordingly by database if not specified
:hive_serde: Output format of the Hive Table
:dbtap_id: data store ID of the target database, in Qubole.
:db_table: name of the db table
:where_clause: where clause, if any
:parallelism: number of parallel db connections to use for extracting data
:extract_query: SQL query to extract data from db. $CONDITIONS must be part
of the where clause.
:boundary_query: Query to be used get range of row IDs to be extracted
:split_column: Column used as row ID to split data into ranges (mode 2)
:use_customer_cluster: To use cluster to run command
:customer_cluster_label: the label of the cluster to run the command on
:additional_options: Additional Sqoop options which are needed enclose options in
double or single quotes
jupytercmd:
:path: Path including name of the Jupyter notebook to be run with extension.
:arguments: Valid JSON to be sent to the notebook. Specify the parameters in notebooks and pass
the parameter value using the JSON format. key is the parameter's name and value is
the parameter's value. Supported types in parameters are string, integer, float and boolean.
.. note:
Following fields are template-supported : ``query``, ``script_location``,
``sub_command``, ``script``, ``files``, ``archives``, ``program``, ``cmdline``,
``sql``, ``where_clause``, ``extract_query``, ``boundary_query``, ``macros``,
``tags``, ``name``, ``parameters``, ``dbtap_id``, ``hive_table``, ``db_table``,
``split_column``, ``note_id``, ``db_update_keys``, ``export_dir``,
``partition_spec``, ``qubole_conn_id``, ``arguments``, ``user_program_arguments``.
You can also use ``.txt`` files for template driven use cases.
.. note:
In QuboleOperator there is a default handler for task failures and retries,
which generally kills the command running at QDS for the corresponding task
instance. You can override this behavior by providing your own failure and retry
handler in task definition.
"""
template_fields: Sequence[str] = (
"query",
"script_location",
"sub_command",
"script",
"files",
"archives",
"program",
"cmdline",
"sql",
"where_clause",
"tags",
"extract_query",
"boundary_query",
"macros",
"name",
"parameters",
"dbtap_id",
"hive_table",
"db_table",
"split_column",
"note_id",
"db_update_keys",
"export_dir",
"partition_spec",
"qubole_conn_id",
"arguments",
"user_program_arguments",
"cluster_label",
)
template_ext: Sequence[str] = (".txt",)
ui_color = "#3064A1"
ui_fgcolor = "#fff"
qubole_hook_allowed_args_list = ["command_type", "qubole_conn_id", "fetch_logs"]
operator_extra_links = (QDSLink(),)
def __init__(self, *, qubole_conn_id: str = "qubole_default", **kwargs) -> None:
self.kwargs = kwargs
self.kwargs["qubole_conn_id"] = qubole_conn_id
self.hook: QuboleHook | None = None
filtered_base_kwargs = self._get_filtered_args(kwargs)
super().__init__(**filtered_base_kwargs)
if self.on_failure_callback is None:
self.on_failure_callback = QuboleHook.handle_failure_retry
if self.on_retry_callback is None:
self.on_retry_callback = QuboleHook.handle_failure_retry
def _get_filtered_args(self, all_kwargs) -> dict:
qubole_args = (
flatten_list(COMMAND_ARGS.values())
+ HYPHEN_ARGS
+ flatten_list(POSITIONAL_ARGS.values())
+ self.qubole_hook_allowed_args_list
)
return {key: value for key, value in all_kwargs.items() if key not in qubole_args}
def execute(self, context: Context) -> None:
return self.get_hook().execute(context)
def on_kill(self, ti=None) -> None:
if self.hook:
self.hook.kill(ti)
else:
self.get_hook().kill(ti)
def get_results(
self,
ti=None,
fp=None,
inline: bool = True,
delim=None,
fetch: bool = True,
include_headers: bool = False,
) -> str:
"""get_results from Qubole."""
return self.get_hook().get_results(ti, fp, inline, delim, fetch, include_headers)
def get_log(self, ti) -> None:
"""get_log from Qubole."""
return self.get_hook().get_log(ti)
def get_jobs_id(self, ti) -> None:
"""Get jobs_id from Qubole."""
return self.get_hook().get_jobs_id(ti)
def get_hook(self) -> QuboleHook:
"""Reinitialising the hook, as some template fields might have changed."""
return QuboleHook(**self.kwargs)
def __getattribute__(self, name: str) -> str:
if name in _get_template_fields(self):
if name in self.kwargs:
return self.kwargs[name]
else:
return ""
else:
return object.__getattribute__(self, name)
def __setattr__(self, name: str, value: str) -> None:
if name in _get_template_fields(self):
self.kwargs[name] = value
else:
object.__setattr__(self, name, value)
def _get_template_fields(obj: BaseOperator) -> dict:
class_ = object.__getattribute__(obj, "__class__")
template_fields = object.__getattribute__(class_, "template_fields")
return template_fields
| 12,547 | 40.006536 | 108 | py |
airflow | airflow-main/airflow/providers/qubole/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/qubole/hooks/qubole_check.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from io import StringIO
from qds_sdk.commands import Command
from airflow.exceptions import AirflowException
from airflow.providers.common.sql.hooks.sql import DbApiHook
from airflow.providers.qubole.hooks.qubole import QuboleHook
log = logging.getLogger(__name__)
COL_DELIM = "\t"
ROW_DELIM = "\r\n"
def isint(value) -> bool:
"""Whether Qubole column are integer."""
try:
int(value)
return True
except ValueError:
return False
def isfloat(value) -> bool:
"""Whether Qubole column are float."""
try:
float(value)
return True
except ValueError:
return False
def isbool(value) -> bool:
"""Whether Qubole column are boolean."""
try:
return value.lower() in ["true", "false"]
except ValueError:
return False
def parse_first_row(row_list) -> list[bool | float | int | str]:
"""Parse Qubole first record list."""
record_list = []
first_row = row_list[0] if row_list else ""
for col_value in first_row.split(COL_DELIM):
if isint(col_value):
col_value = int(col_value)
elif isfloat(col_value):
col_value = float(col_value)
elif isbool(col_value):
col_value = col_value.lower() == "true"
record_list.append(col_value)
return record_list
class QuboleCheckHook(QuboleHook, DbApiHook):
"""Qubole check hook."""
def __init__(self, context, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.results_parser_callable = parse_first_row
if "results_parser_callable" in kwargs and kwargs["results_parser_callable"] is not None:
if not callable(kwargs["results_parser_callable"]):
raise AirflowException("`results_parser_callable` param must be callable")
self.results_parser_callable = kwargs["results_parser_callable"]
self.context = context
@staticmethod
def handle_failure_retry(context) -> None:
ti = context["ti"]
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
if cmd_id is not None:
cmd = Command.find(cmd_id)
if cmd is not None:
if cmd.status == "running":
log.info("Cancelling the Qubole Command Id: %s", cmd_id)
cmd.cancel()
def get_first(self, sql):
"""Get Qubole query first record list."""
self.execute(context=self.context)
query_result = self.get_query_results()
row_list = list(filter(None, query_result.split(ROW_DELIM)))
record_list = self.results_parser_callable(row_list)
return record_list
def get_query_results(self) -> str | None:
"""Get Qubole query result."""
if self.cmd is not None:
cmd_id = self.cmd.id
self.log.info("command id: %d", cmd_id)
query_result_buffer = StringIO()
self.cmd.get_results(fp=query_result_buffer, inline=True, delim=COL_DELIM, arguments=["true"])
query_result = query_result_buffer.getvalue()
query_result_buffer.close()
return query_result
else:
self.log.error("Qubole command not found")
return None
| 4,091 | 32.268293 | 106 | py |
airflow | airflow-main/airflow/providers/qubole/hooks/qubole.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Qubole hook."""
from __future__ import annotations
import datetime
import logging
import os
import pathlib
import time
from typing import TYPE_CHECKING, Any
from qds_sdk.commands import (
Command,
DbExportCommand,
DbImportCommand,
DbTapQueryCommand,
HadoopCommand,
HiveCommand,
JupyterNotebookCommand,
PigCommand,
PrestoCommand,
ShellCommand,
SparkCommand,
SqlCommand,
)
from qds_sdk.qubole import Qubole
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.utils.state import State
if TYPE_CHECKING:
from airflow.models.taskinstance import TaskInstance
from airflow.serialization.pydantic.taskinstance import TaskInstancePydantic
from airflow.utils.context import Context
log = logging.getLogger(__name__)
COMMAND_CLASSES = {
"hivecmd": HiveCommand,
"prestocmd": PrestoCommand,
"hadoopcmd": HadoopCommand,
"shellcmd": ShellCommand,
"pigcmd": PigCommand,
"sparkcmd": SparkCommand,
"dbtapquerycmd": DbTapQueryCommand,
"dbexportcmd": DbExportCommand,
"dbimportcmd": DbImportCommand,
"sqlcmd": SqlCommand,
"jupytercmd": JupyterNotebookCommand,
}
POSITIONAL_ARGS = {"hadoopcmd": ["sub_command"], "shellcmd": ["parameters"], "pigcmd": ["parameters"]}
def flatten_list(list_of_lists) -> list:
"""Flatten the list."""
return [element for array in list_of_lists for element in array]
def filter_options(options: list) -> list:
"""Remove options from the list."""
options_to_remove = ["help", "print-logs-live", "print-logs", "pool"]
return [option for option in options if option not in options_to_remove]
def get_options_list(command_class) -> list:
"""Get options list."""
options_list = [option.get_opt_string().strip("--") for option in command_class.optparser.option_list]
return filter_options(options_list)
def build_command_args() -> tuple[dict[str, list], list]:
"""Build Command argument from command and options."""
command_args, hyphen_args = {}, set()
for cmd in COMMAND_CLASSES:
# get all available options from the class
opts_list = get_options_list(COMMAND_CLASSES[cmd])
# append positional args if any for the command
if cmd in POSITIONAL_ARGS:
opts_list += POSITIONAL_ARGS[cmd]
# get args with a hyphen and replace them with underscore
for index, opt in enumerate(opts_list):
if "-" in opt:
opts_list[index] = opt.replace("-", "_")
hyphen_args.add(opts_list[index])
command_args[cmd] = opts_list
return command_args, list(hyphen_args)
COMMAND_ARGS, HYPHEN_ARGS = build_command_args()
class QuboleHook(BaseHook):
"""Hook for Qubole communication."""
conn_name_attr: str = "qubole_conn_id"
default_conn_name = "qubole_default"
conn_type = "qubole"
hook_name = "Qubole"
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["login", "schema", "port", "extra"],
"relabeling": {
"host": "API Endpoint",
"password": "Auth Token",
},
"placeholders": {"host": "https://<env>.qubole.com/api"},
}
def __init__(self, *args, **kwargs) -> None:
super().__init__()
conn = self.get_connection(kwargs.get("qubole_conn_id", self.default_conn_name))
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.task_id = kwargs["task_id"]
self.dag_id = kwargs["dag"].dag_id
self.kwargs = kwargs
self.cls = COMMAND_CLASSES[self.kwargs["command_type"]]
self.cmd: Command | None = None
self.task_instance: TaskInstance | TaskInstancePydantic | None = None
@staticmethod
def handle_failure_retry(context) -> None:
"""Handle retries in case of failures."""
ti = context["ti"]
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
if cmd_id is not None:
cmd = Command.find(cmd_id)
if cmd is not None:
if cmd.status == "done":
log.info("Command ID: %s has been succeeded, hence marking this TI as Success.", cmd_id)
ti.state = State.SUCCESS
elif cmd.status == "running":
log.info("Cancelling the Qubole Command Id: %s", cmd_id)
cmd.cancel()
def execute(self, context: Context) -> None:
"""Execute call."""
args = self.cls.parse(self.create_cmd_args(context))
self.cmd = self.cls.create(**args)
self.task_instance = context["task_instance"]
context["task_instance"].xcom_push(key="qbol_cmd_id", value=self.cmd.id) # type: ignore[attr-defined]
self.log.info(
"Qubole command created with Id: %s and Status: %s",
self.cmd.id, # type: ignore[attr-defined]
self.cmd.status, # type: ignore[attr-defined]
)
while not Command.is_done(self.cmd.status): # type: ignore[attr-defined]
time.sleep(Qubole.poll_interval)
self.cmd = self.cls.find(self.cmd.id) # type: ignore[attr-defined]
self.log.info(
"Command Id: %s and Status: %s", self.cmd.id, self.cmd.status # type: ignore[attr-defined]
)
if "fetch_logs" in self.kwargs and self.kwargs["fetch_logs"] is True:
self.log.info(
"Logs for Command Id: %s \n%s", self.cmd.id, self.cmd.get_log() # type: ignore[attr-defined]
)
if self.cmd.status != "done": # type: ignore[attr-defined]
raise AirflowException(
"Command Id: {} failed with Status: {}".format(
self.cmd.id, self.cmd.status # type: ignore[attr-defined]
)
)
def kill(self, ti):
"""
Kill (cancel) a Qubole command.
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
if not ti and not self.task_instance:
raise Exception("Unable to cancel Qubole Command, context is unavailable!")
elif not ti:
ti = self.task_instance
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.log.info("Sending KILL signal to Qubole Command Id: %s", self.cmd.id)
self.cmd.cancel()
def get_results(
self,
ti=None,
fp=None,
inline: bool = True,
delim=None,
fetch: bool = True,
include_headers: bool = False,
) -> str:
"""
Get results (or just s3 locations) of a command from Qubole and save into a file.
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
base_log_folder = conf.get_mandatory_value("logging", "BASE_LOG_FOLDER")
logpath = os.path.expanduser(base_log_folder)
resultpath = logpath + "/" + self.dag_id + "/" + self.task_id + "/results"
pathlib.Path(resultpath).mkdir(parents=True, exist_ok=True)
fp = open(resultpath + "/" + iso, "wb")
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
include_headers_str = "true" if include_headers else "false"
self.cmd.get_results(
fp, inline, delim, fetch, arguments=[include_headers_str]
) # type: ignore[attr-defined]
fp.flush()
fp.close()
return fp.name
def get_log(self, ti) -> None:
"""
Get Logs of a command from Qubole.
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(cmd_id)
def get_jobs_id(self, ti) -> None:
"""
Get jobs associated with a Qubole commands.
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job information associated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(cmd_id)
def create_cmd_args(self, context) -> list[str]:
"""Creates command arguments."""
args = []
cmd_type = self.kwargs["command_type"]
inplace_args = None
tags = {self.dag_id, self.task_id, context["run_id"]}
positional_args_list = flatten_list(POSITIONAL_ARGS.values())
for key, value in self.kwargs.items():
if key in COMMAND_ARGS[cmd_type]:
if key in HYPHEN_ARGS:
args.append(f"--{key.replace('_', '-')}={value}")
elif key in positional_args_list:
inplace_args = value
elif key == "tags":
self._add_tags(tags, value)
elif key == "notify":
if value is True:
args.append("--notify")
else:
args.append(f"--{key}={value}")
args.append(f"--tags={','.join(filter(None, tags))}")
if inplace_args is not None:
args += inplace_args.split(" ")
return args
@staticmethod
def _add_tags(tags, value) -> None:
if isinstance(value, str):
tags.add(value)
elif isinstance(value, (list, tuple)):
tags.update(value)
| 11,222 | 35.438312 | 110 | py |
airflow | airflow-main/airflow/providers/qubole/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/qubole/sensors/qubole.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from qds_sdk.qubole import Qubole
from qds_sdk.sensors import FileSensor, PartitionSensor
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class QuboleSensor(BaseSensorOperator):
"""Base class for all Qubole Sensors."""
template_fields: Sequence[str] = ("data", "qubole_conn_id")
template_ext: Sequence[str] = (".txt",)
def __init__(self, *, data, qubole_conn_id: str = "qubole_default", **kwargs) -> None:
self.data = data
self.qubole_conn_id = qubole_conn_id
if "poke_interval" in kwargs and kwargs["poke_interval"] < 5:
raise AirflowException(
f"Sorry, poke_interval can't be less than 5 sec for task '{kwargs['task_id']}' "
f"in dag '{kwargs['dag'].dag_id}'."
)
super().__init__(**kwargs)
def poke(self, context: Context) -> bool:
conn = BaseHook.get_connection(self.qubole_conn_id)
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.log.info("Poking: %s", self.data)
status = False
try:
status = self.sensor_class.check(self.data) # type: ignore[attr-defined]
except Exception as e:
self.log.exception(e)
status = False
self.log.info("Status of this Poke: %s", status)
return status
class QuboleFileSensor(QuboleSensor):
"""
Wait for a file or folder to be present in cloud storage.
Check for file or folder presence via QDS APIs.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/operator:QuboleFileSensor`
:param qubole_conn_id: Connection id which consists of qds auth_token
:param data: a JSON object containing payload, whose presence needs to be checked
Check this `example <https://github.com/apache/airflow/blob/main\
/airflow/providers/qubole/example_dags/example_qubole_sensor.py>`_ for sample payload
structure.
.. note:: Both ``data`` and ``qubole_conn_id`` fields support templating. You can
also use ``.txt`` files for template-driven use cases.
"""
def __init__(self, **kwargs) -> None:
self.sensor_class = FileSensor
super().__init__(**kwargs)
class QubolePartitionSensor(QuboleSensor):
"""
Wait for a Hive partition to show up in QHS (Qubole Hive Service).
Check for Hive partition presence via QDS APIs.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/operator:QubolePartitionSensor`
:param qubole_conn_id: Connection id which consists of qds auth_token
:param data: a JSON object containing payload, whose presence needs to be checked.
Check this `example <https://github.com/apache/airflow/blob/main\
/airflow/providers/qubole/example_dags/example_qubole_sensor.py>`_ for sample payload
structure.
.. note:: Both ``data`` and ``qubole_conn_id`` fields support templating. You can
also use ``.txt`` files for template-driven use cases.
"""
def __init__(self, **kwargs) -> None:
self.sensor_class = PartitionSensor
super().__init__(**kwargs)
| 4,240 | 34.638655 | 96 | py |
airflow | airflow-main/airflow/providers/qubole/sensors/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/vertica/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.5.0"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-vertica:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,532 | 35.5 | 116 | py |
airflow | airflow-main/airflow/providers/vertica/operators/vertica.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import Any, Sequence
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.common.sql.operators.sql import SQLExecuteQueryOperator
class VerticaOperator(SQLExecuteQueryOperator):
"""
Executes sql code in a specific Vertica database.
This class is deprecated.
Please use :class:`airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator`.
:param vertica_conn_id: reference to a specific Vertica database
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
Template references are recognized by str ending in '.sql'
"""
template_fields: Sequence[str] = ("sql",)
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "sql"}
ui_color = "#b4e0ff"
def __init__(self, *, vertica_conn_id: str = "vertica_default", **kwargs: Any) -> None:
super().__init__(conn_id=vertica_conn_id, **kwargs)
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator`.""",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
| 2,100 | 37.907407 | 96 | py |
airflow | airflow-main/airflow/providers/vertica/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/vertica/hooks/vertica.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from vertica_python import connect
from airflow.providers.common.sql.hooks.sql import DbApiHook
class VerticaHook(DbApiHook):
"""Interact with Vertica."""
conn_name_attr = "vertica_conn_id"
default_conn_name = "vertica_default"
conn_type = "vertica"
hook_name = "Vertica"
supports_autocommit = True
def get_conn(self) -> connect:
"""Return verticaql connection object."""
conn = self.get_connection(self.vertica_conn_id) # type: ignore
conn_config = {
"user": conn.login,
"password": conn.password or "",
"database": conn.schema,
"host": conn.host or "localhost",
}
if not conn.port:
conn_config["port"] = 5433
else:
conn_config["port"] = int(conn.port)
bool_options = [
"connection_load_balance",
"binary_transfer",
"disable_copy_local",
"request_complex_types",
"use_prepared_statements",
]
std_options = [
"session_label",
"backup_server_node",
"kerberos_host_name",
"kerberos_service_name",
"unicode_error",
"workload",
"ssl",
]
conn_extra = conn.extra_dejson
for bo in bool_options:
if bo in conn_extra:
conn_config[bo] = str(conn_extra[bo]).lower() in ["true", "on"]
for so in std_options:
if so in conn_extra:
conn_config[so] = conn_extra[so]
if "connection_timeout" in conn_extra:
conn_config["connection_timeout"] = float(conn_extra["connection_timeout"])
if "log_level" in conn_extra:
import logging
log_lvl = conn_extra["log_level"]
conn_config["log_path"] = None
if isinstance(log_lvl, str):
log_lvl = log_lvl.lower()
if log_lvl == "critical":
conn_config["log_level"] = logging.CRITICAL
elif log_lvl == "error":
conn_config["log_level"] = logging.ERROR
elif log_lvl == "warning":
conn_config["log_level"] = logging.WARNING
elif log_lvl == "info":
conn_config["log_level"] = logging.INFO
elif log_lvl == "debug":
conn_config["log_level"] = logging.DEBUG
elif log_lvl == "notset":
conn_config["log_level"] = logging.NOTSET
else:
conn_config["log_level"] = int(conn_extra["log_level"])
conn = connect(**conn_config)
return conn
| 3,531 | 33.627451 | 87 | py |
airflow | airflow-main/airflow/providers/vertica/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/segment/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-segment:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,532 | 35.5 | 116 | py |
airflow | airflow-main/airflow/providers/segment/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/segment/operators/segment_track_event.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from airflow.models import BaseOperator
from airflow.providers.segment.hooks.segment import SegmentHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SegmentTrackEventOperator(BaseOperator):
"""
Send Track Event to Segment for a specified user_id and event.
:param user_id: The ID for this user in your database. (templated)
:param event: The name of the event you're tracking. (templated)
:param properties: A dictionary of properties for the event. (templated)
:param segment_conn_id: The connection ID to use when connecting to Segment.
:param segment_debug_mode: Determines whether Segment should run in debug mode.
Defaults to False
"""
template_fields: Sequence[str] = ("user_id", "event", "properties")
ui_color = "#ffd700"
def __init__(
self,
*,
user_id: str,
event: str,
properties: dict | None = None,
segment_conn_id: str = "segment_default",
segment_debug_mode: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.user_id = user_id
self.event = event
properties = properties or {}
self.properties = properties
self.segment_debug_mode = segment_debug_mode
self.segment_conn_id = segment_conn_id
def execute(self, context: Context) -> None:
hook = SegmentHook(segment_conn_id=self.segment_conn_id, segment_debug_mode=self.segment_debug_mode)
self.log.info(
"Sending track event (%s) for user id: %s with properties: %s",
self.event,
self.user_id,
self.properties,
)
hook.track(user_id=self.user_id, event=self.event, properties=self.properties) # type: ignore
| 2,646 | 35.260274 | 108 | py |
airflow | airflow-main/airflow/providers/segment/hooks/segment.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Connect to your Segment account, retrieve data from it or write to that file.
NOTE: this hook also relies on the Segment analytics package:
https://github.com/segmentio/analytics-python
"""
from __future__ import annotations
import analytics
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
class SegmentHook(BaseHook):
"""
Create new connection to Segment and allows you to pull data out of Segment or write to it.
You can then use that file with other
Airflow operators to move the data around or interact with segment.
:param segment_conn_id: the name of the connection that has the parameters
we need to connect to Segment. The connection should be type `json` and include a
write_key security token in the `Extras` field.
:param segment_debug_mode: Determines whether Segment should run in debug mode.
Defaults to False
.. note::
You must include a JSON structure in the `Extras` field.
We need a user's security token to connect to Segment.
So we define it in the `Extras` field as:
`{"write_key":"YOUR_SECURITY_TOKEN"}`
"""
conn_name_attr = "segment_conn_id"
default_conn_name = "segment_default"
conn_type = "segment"
hook_name = "Segment"
def __init__(
self, segment_conn_id: str = "segment_default", segment_debug_mode: bool = False, *args, **kwargs
) -> None:
super().__init__()
self.segment_conn_id = segment_conn_id
self.segment_debug_mode = segment_debug_mode
self._args = args
self._kwargs = kwargs
# get the connection parameters
self.connection = self.get_connection(self.segment_conn_id)
self.extras = self.connection.extra_dejson
self.write_key = self.extras.get("write_key")
if self.write_key is None:
raise AirflowException("No Segment write key provided")
def get_conn(self) -> analytics:
self.log.info("Setting write key for Segment analytics connection")
analytics.debug = self.segment_debug_mode
if self.segment_debug_mode:
self.log.info("Setting Segment analytics connection to debug mode")
analytics.on_error = self.on_error
analytics.write_key = self.write_key
return analytics
def on_error(self, error: str, items: str) -> None:
"""Handles error callbacks when using Segment with segment_debug_mode set to True."""
self.log.error("Encountered Segment error: %s with items: %s", error, items)
raise AirflowException(f"Segment error: {error}")
| 3,428 | 38.872093 | 105 | py |
airflow | airflow-main/airflow/providers/segment/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/grpc/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-grpc:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,529 | 35.428571 | 113 | py |
airflow | airflow-main/airflow/providers/grpc/operators/grpc.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Callable, Sequence
from airflow.models import BaseOperator
from airflow.providers.grpc.hooks.grpc import GrpcHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class GrpcOperator(BaseOperator):
"""
Calls a gRPC endpoint to execute an action.
:param stub_class: The stub client to use for this gRPC call
:param call_func: The client function name to call the gRPC endpoint
:param grpc_conn_id: The connection to run the operator against
:param data: The data to pass to the rpc call
:param interceptors: A list of gRPC interceptor objects to be used on the channel
:param custom_connection_func: The customized connection function to return channel object.
A callable that accepts the connection as its only arg.
:param streaming: A flag to indicate if the call is a streaming call
:param response_callback: The callback function to process the response from gRPC call,
takes in response object and context object, context object can be used to perform
push xcom or other after task actions
:param log_response: A flag to indicate if we need to log the response
"""
template_fields: Sequence[str] = ("stub_class", "call_func", "data")
template_fields_renderers = {"data": "py"}
def __init__(
self,
*,
stub_class: Callable,
call_func: str,
grpc_conn_id: str = "grpc_default",
data: dict | None = None,
interceptors: list[Callable] | None = None,
custom_connection_func: Callable | None = None,
streaming: bool = False,
response_callback: Callable | None = None,
log_response: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.stub_class = stub_class
self.call_func = call_func
self.grpc_conn_id = grpc_conn_id
self.data = data or {}
self.interceptors = interceptors
self.custom_connection_func = custom_connection_func
self.streaming = streaming
self.log_response = log_response
self.response_callback = response_callback
def _get_grpc_hook(self) -> GrpcHook:
return GrpcHook(
self.grpc_conn_id,
interceptors=self.interceptors,
custom_connection_func=self.custom_connection_func,
)
def execute(self, context: Context) -> None:
hook = self._get_grpc_hook()
self.log.info("Calling gRPC service")
# grpc hook always yield
responses = hook.run(self.stub_class, self.call_func, streaming=self.streaming, data=self.data)
for response in responses:
self._handle_response(response, context)
def _handle_response(self, response: Any, context: Context) -> None:
if self.log_response:
self.log.info(repr(response))
if self.response_callback:
self.response_callback(response, context)
| 3,800 | 38.185567 | 103 | py |
airflow | airflow-main/airflow/providers/grpc/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/grpc/hooks/grpc.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""GRPC Hook."""
from __future__ import annotations
from typing import Any, Callable, Generator
import grpc
from google import auth as google_auth
from google.auth import jwt as google_auth_jwt
from google.auth.transport import (
grpc as google_auth_transport_grpc,
requests as google_auth_transport_requests,
)
from airflow.exceptions import AirflowConfigException
from airflow.hooks.base import BaseHook
class GrpcHook(BaseHook):
"""
General interaction with gRPC servers.
:param grpc_conn_id: The connection ID to use when fetching connection info.
:param interceptors: a list of gRPC interceptor objects which would be applied
to the connected gRPC channel. None by default.
Each interceptor should based on or extends the four
official gRPC interceptors, eg, UnaryUnaryClientInterceptor,
UnaryStreamClientInterceptor, StreamUnaryClientInterceptor,
StreamStreamClientInterceptor.
:param custom_connection_func: The customized connection function to return gRPC channel.
A callable that accepts the connection as its only arg.
"""
conn_name_attr = "grpc_conn_id"
default_conn_name = "grpc_default"
conn_type = "grpc"
hook_name = "GRPC Connection"
@staticmethod
def get_connection_form_widgets() -> dict[str, Any]:
"""Returns connection widgets to add to connection form."""
from flask_appbuilder.fieldwidgets import BS3TextFieldWidget
from flask_babel import lazy_gettext
from wtforms import StringField
return {
"auth_type": StringField(lazy_gettext("Grpc Auth Type"), widget=BS3TextFieldWidget()),
"credential_pem_file": StringField(
lazy_gettext("Credential Keyfile Path"), widget=BS3TextFieldWidget()
),
"scopes": StringField(lazy_gettext("Scopes (comma separated)"), widget=BS3TextFieldWidget()),
}
def __init__(
self,
grpc_conn_id: str = default_conn_name,
interceptors: list[Callable] | None = None,
custom_connection_func: Callable | None = None,
) -> None:
super().__init__()
self.grpc_conn_id = grpc_conn_id
self.conn = self.get_connection(self.grpc_conn_id)
self.extras = self.conn.extra_dejson
self.interceptors = interceptors if interceptors else []
self.custom_connection_func = custom_connection_func
def get_conn(self) -> grpc.Channel:
base_url = self.conn.host
if self.conn.port:
base_url = base_url + ":" + str(self.conn.port)
auth_type = self._get_field("auth_type")
if auth_type == "NO_AUTH":
channel = grpc.insecure_channel(base_url)
elif auth_type in {"SSL", "TLS"}:
credential_file_name = self._get_field("credential_pem_file")
with open(credential_file_name, "rb") as credential_file:
creds = grpc.ssl_channel_credentials(credential_file.read())
channel = grpc.secure_channel(base_url, creds)
elif auth_type == "JWT_GOOGLE":
credentials, _ = google_auth.default()
jwt_creds = google_auth_jwt.OnDemandCredentials.from_signing_credentials(credentials)
channel = google_auth_transport_grpc.secure_authorized_channel(jwt_creds, None, base_url)
elif auth_type == "OATH_GOOGLE":
scopes = self._get_field("scopes").split(",")
credentials, _ = google_auth.default(scopes=scopes)
request = google_auth_transport_requests.Request()
channel = google_auth_transport_grpc.secure_authorized_channel(credentials, request, base_url)
elif auth_type == "CUSTOM":
if not self.custom_connection_func:
raise AirflowConfigException(
"Customized connection function not set, not able to establish a channel"
)
channel = self.custom_connection_func(self.conn)
else:
raise AirflowConfigException(
"auth_type not supported or not provided, channel cannot be established, "
f"given value: {str(auth_type)}"
)
if self.interceptors:
for interceptor in self.interceptors:
channel = grpc.intercept_channel(channel, interceptor)
return channel
def run(
self, stub_class: Callable, call_func: str, streaming: bool = False, data: dict | None = None
) -> Generator:
"""Call gRPC function and yield response to caller."""
if data is None:
data = {}
with self.get_conn() as channel:
stub = stub_class(channel)
try:
rpc_func = getattr(stub, call_func)
response = rpc_func(**data)
if not streaming:
yield response
else:
yield from response
except grpc.RpcError as ex:
self.log.exception(
"Error occurred when calling the grpc service: %s, method: %s \
status code: %s, error details: %s",
stub.__class__.__name__,
call_func,
ex.code(),
ex.details(),
)
raise ex
def _get_field(self, field_name: str):
"""Get field from extra, first checking short name, then for backcompat we check for prefixed name."""
backcompat_prefix = "extra__grpc__"
if field_name.startswith("extra__"):
raise ValueError(
f"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix "
"when using this method."
)
if field_name in self.extras:
return self.extras[field_name]
prefixed_name = f"{backcompat_prefix}{field_name}"
if prefixed_name in self.extras:
return self.extras[prefixed_name]
raise KeyError(f"Param {field_name} not found in extra dict")
| 6,853 | 40.792683 | 110 | py |
airflow | airflow-main/airflow/providers/grpc/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/kafka/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "1.1.2"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-kafka:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,537 | 35.619048 | 121 | py |
airflow | airflow-main/airflow/providers/apache/kafka/operators/consume.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import partial
from typing import Any, Callable, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.kafka.hooks.consume import KafkaConsumerHook
from airflow.utils.module_loading import import_string
VALID_COMMIT_CADENCE = {"never", "end_of_batch", "end_of_operator"}
class ConsumeFromTopicOperator(BaseOperator):
"""An operator that consumes from Kafka a topic(s) and processing the messages.
The operator creates a Kafka consumer that reads a batch of messages from the cluster and processes them
using the user supplied callable function. The consumer will continue to read in batches until it reaches
the end of the log or reads a maximum number of messages is reached.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
:param topics: A list of topics or regex patterns the consumer should subscribe to.
:param apply_function: The function that should be applied to fetched one at a time.
name of dag file executing the function and the function name delimited by a `.`
:param apply_function_batch: The function that should be applied to a batch of messages fetched. Can not
be used with `apply_function`. Intended for transactional workloads where an expensive task might
be called before or after operations on the messages are taken.
:param apply_function_args: Additional arguments that should be applied to the callable, defaults to None
:param apply_function_kwargs: Additional key word arguments that should be applied to the callable
defaults to None
:param commit_cadence: When consumers should commit offsets ("never", "end_of_batch","end_of_operator"),
defaults to "end_of_operator";
if end_of_operator, the commit() is called based on the max_messages arg. Commits are made after the
operator has processed the apply_function method for the maximum messages in the operator.
if end_of_batch, the commit() is called based on the max_batch_size arg. Commits are made after each
batch has processed by the apply_function method for all messages in the batch.
if never, close() is called without calling the commit() method.
:param max_messages: The maximum total number of messages an operator should read from Kafka,
defaults to None implying read to the end of the topic.
:param max_batch_size: The maximum number of messages a consumer should read when polling,
defaults to 1000
:param poll_timeout: How long the Kafka consumer should wait before determining no more messages are
available, defaults to 60
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ConsumeFromTopicOperator`
"""
BLUE = "#ffefeb"
ui_color = BLUE
template_fields = (
"topics",
"apply_function",
"apply_function_args",
"apply_function_kwargs",
"kafka_config_id",
)
def __init__(
self,
topics: str | Sequence[str],
kafka_config_id: str = "kafka_default",
apply_function: Callable[..., Any] | str | None = None,
apply_function_batch: Callable[..., Any] | str | None = None,
apply_function_args: Sequence[Any] | None = None,
apply_function_kwargs: dict[Any, Any] | None = None,
commit_cadence: str | None = "end_of_operator",
max_messages: int | None = None,
max_batch_size: int = 1000,
poll_timeout: float = 60,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.topics = topics
self.apply_function = apply_function
self.apply_function_batch = apply_function_batch
self.apply_function_args = apply_function_args or ()
self.apply_function_kwargs = apply_function_kwargs or {}
self.kafka_config_id = kafka_config_id
self.commit_cadence = commit_cadence
self.max_messages = max_messages or True
self.max_batch_size = max_batch_size
self.poll_timeout = poll_timeout
if self.max_messages is True:
self.read_to_end = True
else:
self.read_to_end = False
if self.commit_cadence not in VALID_COMMIT_CADENCE:
raise AirflowException(
f"commit_cadence must be one of {VALID_COMMIT_CADENCE}. Got {self.commit_cadence}"
)
if self.max_messages and self.max_batch_size > self.max_messages:
self.log.warning(
"max_batch_size (%s) > max_messages (%s). Setting max_messages to %s ",
self.max_batch_size,
self.max_messages,
self.max_batch_size,
)
if self.commit_cadence == "never":
self.commit_cadence = None
if apply_function and apply_function_batch:
raise AirflowException(
"One of apply_function or apply_function_batch must be supplied, not both."
)
def execute(self, context) -> Any:
consumer = KafkaConsumerHook(topics=self.topics, kafka_config_id=self.kafka_config_id).get_consumer()
if isinstance(self.apply_function, str):
self.apply_function = import_string(self.apply_function)
if isinstance(self.apply_function_batch, str):
self.apply_function_batch = import_string(self.apply_function_batch)
if self.apply_function:
apply_callable = partial(
self.apply_function, *self.apply_function_args, **self.apply_function_kwargs # type: ignore
)
if self.apply_function_batch:
apply_callable = partial(
self.apply_function_batch, # type: ignore
*self.apply_function_args,
**self.apply_function_kwargs,
)
messages_left = self.max_messages
while self.read_to_end or (
messages_left > 0
): # bool(True > 0) == True in the case where self.max_messages isn't set by the user
if not isinstance(messages_left, bool):
batch_size = self.max_batch_size if messages_left > self.max_batch_size else messages_left
else:
batch_size = self.max_batch_size
msgs = consumer.consume(num_messages=batch_size, timeout=self.poll_timeout)
messages_left -= len(msgs)
if not msgs: # No messages + messages_left is being used.
self.log.info("Reached end of log. Exiting.")
break
if self.apply_function:
for m in msgs:
apply_callable(m)
if self.apply_function_batch:
apply_callable(msgs)
if self.commit_cadence == "end_of_batch":
self.log.info("committing offset at %s", self.commit_cadence)
consumer.commit()
if self.commit_cadence:
self.log.info("committing offset at %s", self.commit_cadence)
consumer.commit()
consumer.close()
return
| 8,020 | 41.664894 | 109 | py |
airflow | airflow-main/airflow/providers/apache/kafka/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/kafka/operators/produce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import logging
from functools import partial
from typing import Any, Callable, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.kafka.hooks.produce import KafkaProducerHook
from airflow.utils.module_loading import import_string
local_logger = logging.getLogger("airflow")
def acked(err, msg):
if err is not None:
local_logger.error(f"Failed to deliver message: {err}")
else:
local_logger.info(
f"Produced record to topic {msg.topic()} partition [{msg.partition()}] @ offset {msg.offset()}"
)
class ProduceToTopicOperator(BaseOperator):
"""An operator that produces messages to a Kafka topic.
Registers a producer to a kafka topic and publishes messages to the log.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
:param topic: The topic the producer should produce to, defaults to None
:param producer_function: The function that generates key/value pairs as messages for production,
defaults to None
:param producer_function_args: Additional arguments to be applied to the producer callable,
defaults to None
:param producer_function_kwargs: Additional keyword arguments to be applied to the producer callable,
defaults to None
:param delivery_callback: The callback to apply after delivery(or failure) of a message, defaults to None
:param synchronous: If writes to kafka should be fully synchronous, defaults to True
:param poll_timeout: How long of a delay should be applied when calling poll after production to kafka,
defaults to 0
:raises AirflowException: _description_
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ProduceToTopicOperator`
"""
template_fields = (
"topic",
"producer_function",
"producer_function_args",
"producer_function_kwargs",
"kafka_config_id",
)
def __init__(
self,
topic: str,
producer_function: str | Callable[..., Any],
kafka_config_id: str = "kafka_default",
producer_function_args: Sequence[Any] | None = None,
producer_function_kwargs: dict[Any, Any] | None = None,
delivery_callback: str | None = None,
synchronous: bool = True,
poll_timeout: float = 0,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
if delivery_callback:
dc = import_string(delivery_callback)
else:
dc = acked
self.kafka_config_id = kafka_config_id
self.topic = topic
self.producer_function = producer_function
self.producer_function_args = producer_function_args or ()
self.producer_function_kwargs = producer_function_kwargs or {}
self.delivery_callback = dc
self.synchronous = synchronous
self.poll_timeout = poll_timeout
if not (self.topic and self.producer_function):
raise AirflowException(
"topic and producer_function must be provided. Got topic="
f"{self.topic} and producer_function={self.producer_function}"
)
return
def execute(self, context) -> None:
# Get producer and callable
producer = KafkaProducerHook(kafka_config_id=self.kafka_config_id).get_producer()
if isinstance(self.producer_function, str):
self.producer_function = import_string(self.producer_function)
producer_callable = partial(
self.producer_function, # type: ignore
*self.producer_function_args,
**self.producer_function_kwargs,
)
# For each returned k/v in the callable : publish and flush if needed.
for k, v in producer_callable():
producer.produce(self.topic, key=k, value=v, on_delivery=self.delivery_callback)
producer.poll(self.poll_timeout)
if self.synchronous:
producer.flush()
producer.flush()
| 4,951 | 36.801527 | 109 | py |
airflow | airflow-main/airflow/providers/apache/kafka/triggers/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/kafka/triggers/await_message.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import asyncio
from functools import partial
from typing import Any, Sequence
from asgiref.sync import sync_to_async
from airflow import AirflowException
from airflow.providers.apache.kafka.hooks.consume import KafkaConsumerHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
from airflow.utils.module_loading import import_string
class AwaitMessageTrigger(BaseTrigger):
"""A trigger that waits for a message matching specific criteria to arrive in Kafka.
The behavior of the consumer of this trigger is as follows:
- poll the Kafka topics for a message, if no message returned, sleep
- process the message with provided callable and commit the message offset:
- if callable returns any data, raise a TriggerEvent with the return data
- else continue to next message
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
:param topics: The topic (or topic regex) that should be searched for messages
:param apply_function: the location of the function to apply to messages for determination of matching
criteria. (In python dot notation as a string)
:param apply_function_args: A set of arguments to apply to the callable, defaults to None
:param apply_function_kwargs: A set of key word arguments to apply to the callable, defaults to None,
defaults to None
:param poll_timeout: How long the Kafka client should wait before returning from a poll request to
Kafka (seconds), defaults to 1
:param poll_interval: How long the the trigger should sleep after reaching the end of the Kafka log
(seconds), defaults to 5
"""
def __init__(
self,
topics: Sequence[str],
apply_function: str,
kafka_config_id: str = "kafka_default",
apply_function_args: Sequence[Any] | None = None,
apply_function_kwargs: dict[Any, Any] | None = None,
poll_timeout: float = 1,
poll_interval: float = 5,
) -> None:
self.topics = topics
self.apply_function = apply_function
self.apply_function_args = apply_function_args or ()
self.apply_function_kwargs = apply_function_kwargs or {}
self.kafka_config_id = kafka_config_id
self.poll_timeout = poll_timeout
self.poll_interval = poll_interval
def serialize(self) -> tuple[str, dict[str, Any]]:
return (
"airflow.providers.apache.kafka.triggers.await_message.AwaitMessageTrigger",
{
"topics": self.topics,
"apply_function": self.apply_function,
"apply_function_args": self.apply_function_args,
"apply_function_kwargs": self.apply_function_kwargs,
"kafka_config_id": self.kafka_config_id,
"poll_timeout": self.poll_timeout,
"poll_interval": self.poll_interval,
},
)
async def run(self):
consumer_hook = KafkaConsumerHook(topics=self.topics, kafka_config_id=self.kafka_config_id)
async_get_consumer = sync_to_async(consumer_hook.get_consumer)
consumer = await async_get_consumer()
async_poll = sync_to_async(consumer.poll)
async_commit = sync_to_async(consumer.commit)
processing_call = import_string(self.apply_function)
processing_call = partial(processing_call, *self.apply_function_args, **self.apply_function_kwargs)
async_message_process = sync_to_async(processing_call)
while True:
message = await async_poll(self.poll_timeout)
if message is None:
continue
elif message.error():
raise AirflowException(f"Error: {message.error()}")
else:
rv = await async_message_process(message)
if rv:
await async_commit(asynchronous=False)
yield TriggerEvent(rv)
break
else:
await async_commit(asynchronous=False)
await asyncio.sleep(self.poll_interval)
| 4,940 | 40.175 | 107 | py |
airflow | airflow-main/airflow/providers/apache/kafka/hooks/base.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import Any
from confluent_kafka.admin import AdminClient
from airflow.hooks.base import BaseHook
class KafkaBaseHook(BaseHook):
"""
A base hook for interacting with Apache Kafka.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
"""
conn_name_attr = "kafka_config_id"
default_conn_name = "kafka_default"
conn_type = "kafka"
hook_name = "Apache Kafka"
def __init__(self, kafka_config_id=default_conn_name, *args, **kwargs):
"""Initialize our Base."""
super().__init__()
self.kafka_config_id = kafka_config_id
self.get_conn
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "login", "password", "port", "host"],
"relabeling": {"extra": "Config Dict"},
"placeholders": {
"extra": '{"bootstrap.servers": "localhost:9092"}',
},
}
def _get_client(self, config):
raise NotImplementedError
@cached_property
def get_conn(self) -> Any:
"""Get the configuration object."""
config = self.get_connection(self.kafka_config_id).extra_dejson
if not (config.get("bootstrap.servers", None)):
raise ValueError("config['bootstrap.servers'] must be provided.")
return self._get_client(config)
def test_connection(self) -> tuple[bool, str]:
"""Test Connectivity from the UI."""
try:
config = self.get_connection(self.kafka_config_id).extra_dejson
t = AdminClient(config, timeout=10).list_topics()
if t:
return True, "Connection successful."
except Exception as e:
False, str(e)
return False, "Failed to establish connection."
| 2,736 | 33.2125 | 85 | py |
airflow | airflow-main/airflow/providers/apache/kafka/hooks/consume.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Sequence
from confluent_kafka import Consumer
from airflow.providers.apache.kafka.hooks.base import KafkaBaseHook
class KafkaConsumerHook(KafkaBaseHook):
"""
A hook for creating a Kafka Consumer.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
:param topics: A list of topics to subscribe to.
"""
def __init__(self, topics: Sequence[str], kafka_config_id=KafkaBaseHook.default_conn_name) -> None:
super().__init__(kafka_config_id=kafka_config_id)
self.topics = topics
def _get_client(self, config) -> Consumer:
return Consumer(config)
def get_consumer(self) -> Consumer:
"""Returns a Consumer that has been subscribed to topics."""
consumer = self.get_conn
consumer.subscribe(self.topics)
return consumer
| 1,677 | 33.958333 | 103 | py |
airflow | airflow-main/airflow/providers/apache/kafka/hooks/client.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Sequence
from confluent_kafka import KafkaException
from confluent_kafka.admin import AdminClient, NewTopic
from airflow.providers.apache.kafka.hooks.base import KafkaBaseHook
class KafkaAdminClientHook(KafkaBaseHook):
"""
A hook for interacting with the Kafka Cluster.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
"""
def __init__(self, kafka_config_id=KafkaBaseHook.default_conn_name) -> None:
super().__init__(kafka_config_id=kafka_config_id)
def _get_client(self, config) -> AdminClient:
return AdminClient(config)
def create_topic(
self,
topics: Sequence[Sequence[Any]],
) -> None:
"""Creates a topic.
:param topics: a list of topics to create including the number of partitions for the topic
and the replication factor. Format: [ ("topic_name", number of partitions, replication factor)]
"""
admin_client = self.get_conn
new_topics = [NewTopic(t[0], num_partitions=t[1], replication_factor=t[2]) for t in topics]
futures = admin_client.create_topics(new_topics)
for t, f in futures.items():
try:
f.result()
self.log.info("The topic %s has been created.", t)
except KafkaException as e:
if e.args[0].name == "TOPIC_ALREADY_EXISTS":
self.log.warning("The topic %s already exists.", t)
else:
raise
| 2,353 | 35.78125 | 105 | py |
airflow | airflow-main/airflow/providers/apache/kafka/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/kafka/hooks/produce.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from confluent_kafka import Producer
from airflow.providers.apache.kafka.hooks.base import KafkaBaseHook
class KafkaProducerHook(KafkaBaseHook):
"""
A hook for creating a Kafka Producer.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
"""
def __init__(self, kafka_config_id=KafkaBaseHook.default_conn_name) -> None:
super().__init__(kafka_config_id=kafka_config_id)
def _get_client(self, config) -> Producer:
return Producer(config)
def get_producer(self) -> Producer:
"""Returns a producer object for sending messages to Kafka."""
producer = self.get_conn
self.log.info("Producer %s", producer)
return producer
| 1,551 | 35.093023 | 85 | py |
airflow | airflow-main/airflow/providers/apache/kafka/sensors/kafka.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Callable, Sequence
from airflow.models import BaseOperator
from airflow.providers.apache.kafka.triggers.await_message import AwaitMessageTrigger
VALID_COMMIT_CADENCE = {"never", "end_of_batch", "end_of_operator"}
class AwaitMessageSensor(BaseOperator):
"""An Airflow sensor that defers until a specific message is published to Kafka.
The sensor creates a consumer that reads the Kafka log until it encounters a positive event.
The behavior of the consumer for this trigger is as follows:
- poll the Kafka topics for a message
- if no message returned, sleep
- process the message with provided callable and commit the message offset
- if callable returns any data, raise a TriggerEvent with the return data
- else continue to next message
- return event (as default xcom or specific xcom key)
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
:param topics: Topics (or topic regex) to use for reading from
:param apply_function: The function to apply to messages to determine if an event occurred. As a dot
notation string.
:param apply_function_args: Arguments to be applied to the processing function,
defaults to None
:param apply_function_kwargs: Key word arguments to be applied to the processing function,
defaults to None
:param poll_timeout: How long the kafka consumer should wait for a message to arrive from the kafka
cluster,defaults to 1
:param poll_interval: How long the kafka consumer should sleep after reaching the end of the Kafka log,
defaults to 5
:param xcom_push_key: the name of a key to push the returned message to, defaults to None
"""
BLUE = "#ffefeb"
ui_color = BLUE
template_fields = (
"topics",
"apply_function",
"apply_function_args",
"apply_function_kwargs",
"kafka_config_id",
)
def __init__(
self,
topics: Sequence[str],
apply_function: str,
kafka_config_id: str = "kafka_default",
apply_function_args: Sequence[Any] | None = None,
apply_function_kwargs: dict[Any, Any] | None = None,
poll_timeout: float = 1,
poll_interval: float = 5,
xcom_push_key=None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.topics = topics
self.apply_function = apply_function
self.apply_function_args = apply_function_args
self.apply_function_kwargs = apply_function_kwargs
self.kafka_config_id = kafka_config_id
self.poll_timeout = poll_timeout
self.poll_interval = poll_interval
self.xcom_push_key = xcom_push_key
def execute(self, context) -> Any:
self.defer(
trigger=AwaitMessageTrigger(
topics=self.topics,
apply_function=self.apply_function,
apply_function_args=self.apply_function_args,
apply_function_kwargs=self.apply_function_kwargs,
kafka_config_id=self.kafka_config_id,
poll_timeout=self.poll_timeout,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context, event=None):
if self.xcom_push_key:
self.xcom_push(context, key=self.xcom_push_key, value=event)
return event
class AwaitMessageTriggerFunctionSensor(BaseOperator):
"""
Defer until a specific message is published to Kafka, trigger a registered function, then resume waiting.
The behavior of the consumer for this trigger is as follows:
- poll the Kafka topics for a message
- if no message returned, sleep
- process the message with provided callable and commit the message offset
- if callable returns any data, raise a TriggerEvent with the return data
- else continue to next message
- return event (as default xcom or specific xcom key)
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
:param topics: Topics (or topic regex) to use for reading from
:param apply_function: The function to apply to messages to determine if an event occurred. As a dot
notation string.
:param event_triggered_function: The callable to trigger once the apply_function encounters a
positive event.
:param apply_function_args: Arguments to be applied to the processing function, defaults to None
:param apply_function_kwargs: Key word arguments to be applied to the processing function,
defaults to None
:param poll_timeout: How long the kafka consumer should wait for a message to arrive from the kafka
cluster, defaults to 1
:param poll_interval: How long the kafka consumer should sleep after reaching the end of the Kafka log,
defaults to 5
"""
BLUE = "#ffefeb"
ui_color = BLUE
template_fields = (
"topics",
"apply_function",
"apply_function_args",
"apply_function_kwargs",
"kafka_config_id",
)
def __init__(
self,
topics: Sequence[str],
apply_function: str,
event_triggered_function: Callable,
kafka_config_id: str = "kafka_default",
apply_function_args: Sequence[Any] | None = None,
apply_function_kwargs: dict[Any, Any] | None = None,
poll_timeout: float = 1,
poll_interval: float = 5,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.topics = topics
self.apply_function = apply_function
self.apply_function_args = apply_function_args
self.apply_function_kwargs = apply_function_kwargs
self.kafka_config_id = kafka_config_id
self.poll_timeout = poll_timeout
self.poll_interval = poll_interval
self.event_triggered_function = event_triggered_function
if not callable(self.event_triggered_function):
raise TypeError(
"parameter event_triggered_function is expected to be of type callable,"
f"got {type(event_triggered_function)}"
)
def execute(self, context, event=None) -> Any:
self.defer(
trigger=AwaitMessageTrigger(
topics=self.topics,
apply_function=self.apply_function,
apply_function_args=self.apply_function_args,
apply_function_kwargs=self.apply_function_kwargs,
kafka_config_id=self.kafka_config_id,
poll_timeout=self.poll_timeout,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
return event
def execute_complete(self, context, event=None):
self.event_triggered_function(event, **context)
self.defer(
trigger=AwaitMessageTrigger(
topics=self.topics,
apply_function=self.apply_function,
apply_function_args=self.apply_function_args,
apply_function_kwargs=self.apply_function_kwargs,
kafka_config_id=self.kafka_config_id,
poll_timeout=self.poll_timeout,
poll_interval=self.poll_interval,
),
method_name="execute_complete",
)
| 8,219 | 36.706422 | 109 | py |
airflow | airflow-main/airflow/providers/apache/kafka/sensors/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/livy/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.5.2"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-livy:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,536 | 35.595238 | 120 | py |
airflow | airflow-main/airflow/providers/apache/livy/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/livy/operators/livy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains the Apache Livy operator."""
from __future__ import annotations
from time import sleep
from typing import TYPE_CHECKING, Any, Sequence
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.livy.hooks.livy import BatchState, LivyHook
from airflow.providers.apache.livy.triggers.livy import LivyTrigger
if TYPE_CHECKING:
from airflow.utils.context import Context
class LivyOperator(BaseOperator):
"""
Wraps the Apache Livy batch REST API, allowing to submit a Spark application to the underlying cluster.
:param file: path of the file containing the application to execute (required). (templated)
:param class_name: name of the application Java/Spark main class. (templated)
:param args: application command line arguments. (templated)
:param jars: jars to be used in this sessions. (templated)
:param py_files: python files to be used in this session. (templated)
:param files: files to be used in this session. (templated)
:param driver_memory: amount of memory to use for the driver process. (templated)
:param driver_cores: number of cores to use for the driver process. (templated)
:param executor_memory: amount of memory to use per executor process. (templated)
:param executor_cores: number of cores to use for each executor. (templated)
:param num_executors: number of executors to launch for this session. (templated)
:param archives: archives to be used in this session. (templated)
:param queue: name of the YARN queue to which the application is submitted. (templated)
:param name: name of this session. (templated)
:param conf: Spark configuration properties. (templated)
:param proxy_user: user to impersonate when running the job. (templated)
:param livy_conn_id: reference to a pre-defined Livy Connection.
:param livy_conn_auth_type: The auth type for the Livy Connection.
:param polling_interval: time in seconds between polling for job completion. Don't poll for values >=0
:param extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
:param extra_headers: A dictionary of headers passed to the HTTP request to livy.
:param retry_args: Arguments which define the retry behaviour.
:param deferrable: Run operator in the deferrable mode
See Tenacity documentation at https://github.com/jd/tenacity
"""
template_fields: Sequence[str] = ("spark_params",)
template_fields_renderers = {"spark_params": "json"}
def __init__(
self,
*,
file: str,
class_name: str | None = None,
args: Sequence[str | int | float] | None = None,
conf: dict[Any, Any] | None = None,
jars: Sequence[str] | None = None,
py_files: Sequence[str] | None = None,
files: Sequence[str] | None = None,
driver_memory: str | None = None,
driver_cores: int | str | None = None,
executor_memory: str | None = None,
executor_cores: int | str | None = None,
num_executors: int | str | None = None,
archives: Sequence[str] | None = None,
queue: str | None = None,
name: str | None = None,
proxy_user: str | None = None,
livy_conn_id: str = "livy_default",
livy_conn_auth_type: Any | None = None,
polling_interval: int = 0,
extra_options: dict[str, Any] | None = None,
extra_headers: dict[str, Any] | None = None,
retry_args: dict[str, Any] | None = None,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.spark_params = {
"file": file,
"class_name": class_name,
"args": args,
"jars": jars,
"py_files": py_files,
"files": files,
"driver_memory": driver_memory,
"driver_cores": driver_cores,
"executor_memory": executor_memory,
"executor_cores": executor_cores,
"num_executors": num_executors,
"archives": archives,
"queue": queue,
"name": name,
"conf": conf,
"proxy_user": proxy_user,
}
self._livy_conn_id = livy_conn_id
self._livy_conn_auth_type = livy_conn_auth_type
self._polling_interval = polling_interval
self._extra_options = extra_options or {}
self._extra_headers = extra_headers or {}
self._livy_hook: LivyHook | None = None
self._batch_id: int | str
self.retry_args = retry_args
self.deferrable = deferrable
def get_hook(self) -> LivyHook:
"""
Get valid hook.
:return: hook
"""
if self._livy_hook is None or not isinstance(self._livy_hook, LivyHook):
self._livy_hook = LivyHook(
livy_conn_id=self._livy_conn_id,
extra_headers=self._extra_headers,
extra_options=self._extra_options,
auth_type=self._livy_conn_auth_type,
)
return self._livy_hook
def execute(self, context: Context) -> Any:
self._batch_id = self.get_hook().post_batch(**self.spark_params)
self.log.info("Generated batch-id is %s", self._batch_id)
# Wait for the job to complete
if not self.deferrable:
if self._polling_interval > 0:
self.poll_for_termination(self._batch_id)
context["ti"].xcom_push(key="app_id", value=self.get_hook().get_batch(self._batch_id)["appId"])
return self._batch_id
hook = self.get_hook()
state = hook.get_batch_state(self._batch_id, retry_args=self.retry_args)
self.log.debug("Batch with id %s is in state: %s", self._batch_id, state.value)
if state not in hook.TERMINAL_STATES:
self.defer(
timeout=self.execution_timeout,
trigger=LivyTrigger(
batch_id=self._batch_id,
spark_params=self.spark_params,
livy_conn_id=self._livy_conn_id,
polling_interval=self._polling_interval,
extra_options=self._extra_options,
extra_headers=self._extra_headers,
),
method_name="execute_complete",
)
else:
self.log.info("Batch with id %s terminated with state: %s", self._batch_id, state.value)
hook.dump_batch_logs(self._batch_id)
if state != BatchState.SUCCESS:
raise AirflowException(f"Batch {self._batch_id} did not succeed")
context["ti"].xcom_push(key="app_id", value=self.get_hook().get_batch(self._batch_id)["appId"])
return self._batch_id
def poll_for_termination(self, batch_id: int | str) -> None:
"""
Pool Livy for batch termination.
:param batch_id: id of the batch session to monitor.
"""
hook = self.get_hook()
state = hook.get_batch_state(batch_id, retry_args=self.retry_args)
while state not in hook.TERMINAL_STATES:
self.log.debug("Batch with id %s is in state: %s", batch_id, state.value)
sleep(self._polling_interval)
state = hook.get_batch_state(batch_id, retry_args=self.retry_args)
self.log.info("Batch with id %s terminated with state: %s", batch_id, state.value)
hook.dump_batch_logs(batch_id)
if state != BatchState.SUCCESS:
raise AirflowException(f"Batch {batch_id} did not succeed")
def on_kill(self) -> None:
self.kill()
def kill(self) -> None:
"""Delete the current batch session."""
if self._batch_id is not None:
self.get_hook().delete_batch(self._batch_id)
def execute_complete(self, context: Context, event: dict[str, Any]) -> Any:
"""
Callback for when the trigger fires - returns immediately.
Relies on trigger to throw an exception, otherwise it assumes execution was successful.
"""
# dump the logs from livy to worker through triggerer.
if event.get("log_lines", None) is not None:
for log_line in event["log_lines"]:
self.log.info(log_line)
if event["status"] == "error":
raise AirflowException(event["response"])
self.log.info(
"%s completed with response %s",
self.task_id,
event["response"],
)
context["ti"].xcom_push(key="app_id", value=self.get_hook().get_batch(event["batch_id"])["appId"])
return event["batch_id"]
| 9,646 | 42.26009 | 107 | py |
airflow | airflow-main/airflow/providers/apache/livy/triggers/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/livy/triggers/livy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains the Apache Livy Trigger."""
from __future__ import annotations
import asyncio
from typing import Any, AsyncIterator
from airflow.providers.apache.livy.hooks.livy import BatchState, LivyAsyncHook
from airflow.triggers.base import BaseTrigger, TriggerEvent
class LivyTrigger(BaseTrigger):
"""
Check for the state of a previously submitted job with batch_id.
:param batch_id: Batch job id
:param spark_params: Spark parameters; for example,
spark_params = {"file": "test/pi.py", "class_name": "org.apache.spark.examples.SparkPi",
"args": ["/usr/lib/spark/bin/run-example", "SparkPi", "10"],"jars": "command-runner.jar",
"driver_cores": 1, "executor_cores": 4,"num_executors": 1}
:param livy_conn_id: reference to a pre-defined Livy Connection.
:param polling_interval: time in seconds between polling for job completion. If poll_interval=0, in that
case return the batch_id and if polling_interval > 0, poll the livy job for termination in the
polling interval defined.
:param extra_options: A dictionary of options, where key is string and value
depends on the option that's being modified.
:param extra_headers: A dictionary of headers passed to the HTTP request to livy.
:param livy_hook_async: LivyAsyncHook object
"""
def __init__(
self,
batch_id: int | str,
spark_params: dict[Any, Any],
livy_conn_id: str = "livy_default",
polling_interval: int = 0,
extra_options: dict[str, Any] | None = None,
extra_headers: dict[str, Any] | None = None,
livy_hook_async: LivyAsyncHook | None = None,
):
super().__init__()
self._batch_id = batch_id
self.spark_params = spark_params
self._livy_conn_id = livy_conn_id
self._polling_interval = polling_interval
self._extra_options = extra_options
self._extra_headers = extra_headers
self._livy_hook_async = livy_hook_async
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serializes LivyTrigger arguments and classpath."""
return (
"airflow.providers.apache.livy.triggers.livy.LivyTrigger",
{
"batch_id": self._batch_id,
"spark_params": self.spark_params,
"livy_conn_id": self._livy_conn_id,
"polling_interval": self._polling_interval,
"extra_options": self._extra_options,
"extra_headers": self._extra_headers,
"livy_hook_async": self._livy_hook_async,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Run the trigger.
If ``_polling_interval > 0``, this pools Livy for batch termination
asynchronously. Otherwise the success response is created immediately.
"""
try:
if self._polling_interval > 0:
response = await self.poll_for_termination(self._batch_id)
yield TriggerEvent(response)
yield TriggerEvent(
{
"status": "success",
"batch_id": self._batch_id,
"response": f"Batch {self._batch_id} succeeded",
"log_lines": None,
}
)
except Exception as exc:
yield TriggerEvent(
{
"status": "error",
"batch_id": self._batch_id,
"response": f"Batch {self._batch_id} did not succeed with {str(exc)}",
"log_lines": None,
}
)
async def poll_for_termination(self, batch_id: int | str) -> dict[str, Any]:
"""
Pool Livy for batch termination asynchronously.
:param batch_id: id of the batch session to monitor.
"""
hook = self._get_async_hook()
state = await hook.get_batch_state(batch_id)
self.log.info("Batch with id %s is in state: %s", batch_id, state["batch_state"].value)
while state["batch_state"] not in hook.TERMINAL_STATES:
self.log.info("Batch with id %s is in state: %s", batch_id, state["batch_state"].value)
self.log.info("Sleeping for %s seconds", self._polling_interval)
await asyncio.sleep(self._polling_interval)
state = await hook.get_batch_state(batch_id)
self.log.info("Batch with id %s terminated with state: %s", batch_id, state["batch_state"].value)
log_lines = await hook.dump_batch_logs(batch_id)
if state["batch_state"] != BatchState.SUCCESS:
return {
"status": "error",
"batch_id": batch_id,
"response": f"Batch {batch_id} did not succeed",
"log_lines": log_lines,
}
return {
"status": "success",
"batch_id": batch_id,
"response": f"Batch {batch_id} succeeded",
"log_lines": log_lines,
}
def _get_async_hook(self) -> LivyAsyncHook:
if self._livy_hook_async is None or not isinstance(self._livy_hook_async, LivyAsyncHook):
self._livy_hook_async = LivyAsyncHook(
livy_conn_id=self._livy_conn_id,
extra_headers=self._extra_headers,
extra_options=self._extra_options,
)
return self._livy_hook_async
| 6,251 | 41.530612 | 109 | py |
airflow | airflow-main/airflow/providers/apache/livy/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/livy/hooks/livy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains the Apache Livy hook."""
from __future__ import annotations
import asyncio
import json
import re
from enum import Enum
from typing import Any, Sequence
import aiohttp
import requests
from aiohttp import ClientResponseError
from asgiref.sync import sync_to_async
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.http.hooks.http import HttpAsyncHook, HttpHook
from airflow.utils.log.logging_mixin import LoggingMixin
class BatchState(Enum):
"""Batch session states."""
NOT_STARTED = "not_started"
STARTING = "starting"
RUNNING = "running"
IDLE = "idle"
BUSY = "busy"
SHUTTING_DOWN = "shutting_down"
ERROR = "error"
DEAD = "dead"
KILLED = "killed"
SUCCESS = "success"
class LivyHook(HttpHook, LoggingMixin):
"""
Hook for Apache Livy through the REST API.
:param livy_conn_id: reference to a pre-defined Livy Connection.
:param extra_options: A dictionary of options passed to Livy.
:param extra_headers: A dictionary of headers passed to the HTTP request to livy.
:param auth_type: The auth type for the service.
.. seealso::
For more details refer to the Apache Livy API reference:
https://livy.apache.org/docs/latest/rest-api.html
"""
TERMINAL_STATES = {
BatchState.SUCCESS,
BatchState.DEAD,
BatchState.KILLED,
BatchState.ERROR,
}
_def_headers = {"Content-Type": "application/json", "Accept": "application/json"}
conn_name_attr = "livy_conn_id"
default_conn_name = "livy_default"
conn_type = "livy"
hook_name = "Apache Livy"
def __init__(
self,
livy_conn_id: str = default_conn_name,
extra_options: dict[str, Any] | None = None,
extra_headers: dict[str, Any] | None = None,
auth_type: Any | None = None,
) -> None:
super().__init__(http_conn_id=livy_conn_id)
self.extra_headers = extra_headers or {}
self.extra_options = extra_options or {}
if auth_type:
self.auth_type = auth_type
def get_conn(self, headers: dict[str, Any] | None = None) -> Any:
"""
Returns http session for use with requests.
:param headers: additional headers to be passed through as a dictionary
:return: requests session
"""
tmp_headers = self._def_headers.copy() # setting default headers
if headers:
tmp_headers.update(headers)
return super().get_conn(tmp_headers)
def run_method(
self,
endpoint: str,
method: str = "GET",
data: Any | None = None,
headers: dict[str, Any] | None = None,
retry_args: dict[str, Any] | None = None,
) -> Any:
"""
Wrapper for HttpHook, allows to change method on the same HttpHook.
:param method: http method
:param endpoint: endpoint
:param data: request payload
:param headers: headers
:param retry_args: Arguments which define the retry behaviour.
See Tenacity documentation at https://github.com/jd/tenacity
:return: http response
"""
if method not in ("GET", "POST", "PUT", "DELETE", "HEAD"):
raise ValueError(f"Invalid http method '{method}'")
if not self.extra_options:
self.extra_options = {"check_response": False}
back_method = self.method
self.method = method
try:
if retry_args:
result = self.run_with_advanced_retry(
endpoint=endpoint,
data=data,
headers=headers,
extra_options=self.extra_options,
_retry_args=retry_args,
)
else:
result = self.run(endpoint, data, headers, self.extra_options)
finally:
self.method = back_method
return result
def post_batch(self, *args: Any, **kwargs: Any) -> int:
"""
Perform request to submit batch.
:return: batch session id
"""
batch_submit_body = json.dumps(self.build_post_batch_body(*args, **kwargs))
if self.base_url is None:
# need to init self.base_url
self.get_conn()
self.log.info("Submitting job %s to %s", batch_submit_body, self.base_url)
response = self.run_method(
method="POST", endpoint="/batches", data=batch_submit_body, headers=self.extra_headers
)
self.log.debug("Got response: %s", response.text)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise AirflowException(
"Could not submit batch. "
f"Status code: {err.response.status_code}. Message: '{err.response.text}'"
)
batch_id = self._parse_post_response(response.json())
if batch_id is None:
raise AirflowException("Unable to parse the batch session id")
self.log.info("Batch submitted with session id: %s", batch_id)
return batch_id
def get_batch(self, session_id: int | str) -> dict:
"""
Fetch info about the specified batch.
:param session_id: identifier of the batch sessions
:return: response body
"""
self._validate_session_id(session_id)
self.log.debug("Fetching info for batch session %s", session_id)
response = self.run_method(endpoint=f"/batches/{session_id}", headers=self.extra_headers)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.log.warning("Got status code %d for session %s", err.response.status_code, session_id)
raise AirflowException(
f"Unable to fetch batch with id: {session_id}. Message: {err.response.text}"
)
return response.json()
def get_batch_state(self, session_id: int | str, retry_args: dict[str, Any] | None = None) -> BatchState:
"""
Fetch the state of the specified batch.
:param session_id: identifier of the batch sessions
:param retry_args: Arguments which define the retry behaviour.
See Tenacity documentation at https://github.com/jd/tenacity
:return: batch state
"""
self._validate_session_id(session_id)
self.log.debug("Fetching info for batch session %s", session_id)
response = self.run_method(
endpoint=f"/batches/{session_id}/state", retry_args=retry_args, headers=self.extra_headers
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.log.warning("Got status code %d for session %s", err.response.status_code, session_id)
raise AirflowException(
f"Unable to fetch batch with id: {session_id}. Message: {err.response.text}"
)
jresp = response.json()
if "state" not in jresp:
raise AirflowException(f"Unable to get state for batch with id: {session_id}")
return BatchState(jresp["state"])
def delete_batch(self, session_id: int | str) -> dict:
"""
Delete the specified batch.
:param session_id: identifier of the batch sessions
:return: response body
"""
self._validate_session_id(session_id)
self.log.info("Deleting batch session %s", session_id)
response = self.run_method(
method="DELETE", endpoint=f"/batches/{session_id}", headers=self.extra_headers
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.log.warning("Got status code %d for session %s", err.response.status_code, session_id)
raise AirflowException(
f"Could not kill the batch with session id: {session_id}. Message: {err.response.text}"
)
return response.json()
def get_batch_logs(self, session_id: int | str, log_start_position, log_batch_size) -> dict:
"""
Gets the session logs for a specified batch.
:param session_id: identifier of the batch sessions
:param log_start_position: Position from where to pull the logs
:param log_batch_size: Number of lines to pull in one batch
:return: response body
"""
self._validate_session_id(session_id)
log_params = {"from": log_start_position, "size": log_batch_size}
response = self.run_method(
endpoint=f"/batches/{session_id}/log", data=log_params, headers=self.extra_headers
)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.log.warning("Got status code %d for session %s", err.response.status_code, session_id)
raise AirflowException(
f"Could not fetch the logs for batch with session id: {session_id}. "
f"Message: {err.response.text}"
)
return response.json()
def dump_batch_logs(self, session_id: int | str) -> None:
"""
Dumps the session logs for a specified batch.
:param session_id: identifier of the batch sessions
:return: response body
"""
self.log.info("Fetching the logs for batch session with id: %s", session_id)
log_start_line = 0
log_total_lines = 0
log_batch_size = 100
while log_start_line <= log_total_lines:
# Livy log endpoint is paginated.
response = self.get_batch_logs(session_id, log_start_line, log_batch_size)
log_total_lines = self._parse_request_response(response, "total")
log_start_line += log_batch_size
log_lines = self._parse_request_response(response, "log")
for log_line in log_lines:
self.log.info(log_line)
@staticmethod
def _validate_session_id(session_id: int | str) -> None:
"""
Validate session id is a int.
:param session_id: session id
"""
try:
int(session_id)
except (TypeError, ValueError):
raise TypeError("'session_id' must be an integer")
@staticmethod
def _parse_post_response(response: dict[Any, Any]) -> int | None:
"""
Parse batch response for batch id.
:param response: response body
:return: session id
"""
return response.get("id")
@staticmethod
def _parse_request_response(response: dict[Any, Any], parameter):
"""
Parse batch response for batch id.
:param response: response body
:return: value of parameter
"""
return response.get(parameter, [])
@staticmethod
def build_post_batch_body(
file: str,
args: Sequence[str | int | float] | None = None,
class_name: str | None = None,
jars: list[str] | None = None,
py_files: list[str] | None = None,
files: list[str] | None = None,
archives: list[str] | None = None,
name: str | None = None,
driver_memory: str | None = None,
driver_cores: int | str | None = None,
executor_memory: str | None = None,
executor_cores: int | None = None,
num_executors: int | str | None = None,
queue: str | None = None,
proxy_user: str | None = None,
conf: dict[Any, Any] | None = None,
) -> dict:
"""
Build the post batch request body.
.. seealso::
For more information about the format refer to
https://livy.apache.org/docs/latest/rest-api.html
:param file: Path of the file containing the application to execute (required).
:param proxy_user: User to impersonate when running the job.
:param class_name: Application Java/Spark main class string.
:param args: Command line arguments for the application s.
:param jars: jars to be used in this sessions.
:param py_files: Python files to be used in this session.
:param files: files to be used in this session.
:param driver_memory: Amount of memory to use for the driver process string.
:param driver_cores: Number of cores to use for the driver process int.
:param executor_memory: Amount of memory to use per executor process string.
:param executor_cores: Number of cores to use for each executor int.
:param num_executors: Number of executors to launch for this session int.
:param archives: Archives to be used in this session.
:param queue: The name of the YARN queue to which submitted string.
:param name: The name of this session string.
:param conf: Spark configuration properties.
:return: request body
"""
body: dict[str, Any] = {"file": file}
if proxy_user:
body["proxyUser"] = proxy_user
if class_name:
body["className"] = class_name
if args and LivyHook._validate_list_of_stringables(args):
body["args"] = [str(val) for val in args]
if jars and LivyHook._validate_list_of_stringables(jars):
body["jars"] = jars
if py_files and LivyHook._validate_list_of_stringables(py_files):
body["pyFiles"] = py_files
if files and LivyHook._validate_list_of_stringables(files):
body["files"] = files
if driver_memory and LivyHook._validate_size_format(driver_memory):
body["driverMemory"] = driver_memory
if driver_cores:
body["driverCores"] = driver_cores
if executor_memory and LivyHook._validate_size_format(executor_memory):
body["executorMemory"] = executor_memory
if executor_cores:
body["executorCores"] = executor_cores
if num_executors:
body["numExecutors"] = num_executors
if archives and LivyHook._validate_list_of_stringables(archives):
body["archives"] = archives
if queue:
body["queue"] = queue
if name:
body["name"] = name
if conf and LivyHook._validate_extra_conf(conf):
body["conf"] = conf
return body
@staticmethod
def _validate_size_format(size: str) -> bool:
"""
Validate size format.
:param size: size value
:return: true if valid format
"""
if size and not (isinstance(size, str) and re.match(r"^\d+[kmgt]b?$", size, re.IGNORECASE)):
raise ValueError(f"Invalid java size format for string'{size}'")
return True
@staticmethod
def _validate_list_of_stringables(vals: Sequence[str | int | float]) -> bool:
"""
Check the values in the provided list can be converted to strings.
:param vals: list to validate
:return: true if valid
"""
if (
vals is None
or not isinstance(vals, (tuple, list))
or any(1 for val in vals if not isinstance(val, (str, int, float)))
):
raise ValueError("List of strings expected")
return True
@staticmethod
def _validate_extra_conf(conf: dict[Any, Any]) -> bool:
"""
Check configuration values are either strings or ints.
:param conf: configuration variable
:return: true if valid
"""
if conf:
if not isinstance(conf, dict):
raise ValueError("'conf' argument must be a dict")
if any(True for k, v in conf.items() if not (v and isinstance(v, str) or isinstance(v, int))):
raise ValueError("'conf' values must be either strings or ints")
return True
class LivyAsyncHook(HttpAsyncHook, LoggingMixin):
"""
Hook for Apache Livy through the REST API asynchronously.
:param livy_conn_id: reference to a pre-defined Livy Connection.
:param extra_options: A dictionary of options passed to Livy.
:param extra_headers: A dictionary of headers passed to the HTTP request to livy.
.. seealso::
For more details refer to the Apache Livy API reference:
https://livy.apache.org/docs/latest/rest-api.html
"""
TERMINAL_STATES = {
BatchState.SUCCESS,
BatchState.DEAD,
BatchState.KILLED,
BatchState.ERROR,
}
_def_headers = {"Content-Type": "application/json", "Accept": "application/json"}
conn_name_attr = "livy_conn_id"
default_conn_name = "livy_default"
conn_type = "livy"
hook_name = "Apache Livy"
def __init__(
self,
livy_conn_id: str = default_conn_name,
extra_options: dict[str, Any] | None = None,
extra_headers: dict[str, Any] | None = None,
) -> None:
super().__init__(http_conn_id=livy_conn_id)
self.extra_headers = extra_headers or {}
self.extra_options = extra_options or {}
async def _do_api_call_async(
self,
endpoint: str | None = None,
data: dict[str, Any] | str | None = None,
headers: dict[str, Any] | None = None,
extra_options: dict[str, Any] | None = None,
) -> Any:
"""
Performs an asynchronous HTTP request call.
:param endpoint: the endpoint to be called i.e. resource/v1/query?
:param data: payload to be uploaded or request parameters
:param headers: additional headers to be passed through as a dictionary
:param extra_options: Additional kwargs to pass when creating a request.
For example, ``run(json=obj)`` is passed as ``aiohttp.ClientSession().get(json=obj)``
"""
extra_options = extra_options or {}
# headers may be passed through directly or in the "extra" field in the connection
# definition
_headers = {}
auth = None
if self.http_conn_id:
conn = await sync_to_async(self.get_connection)(self.http_conn_id)
self.base_url = self._generate_base_url(conn)
if conn.login:
auth = self.auth_type(conn.login, conn.password)
if conn.extra:
try:
_headers.update(conn.extra_dejson)
except TypeError:
self.log.warning("Connection to %s has invalid extra field.", conn.host)
if headers:
_headers.update(headers)
if self.base_url and not self.base_url.endswith("/") and endpoint and not endpoint.startswith("/"):
url = self.base_url + "/" + endpoint
else:
url = (self.base_url or "") + (endpoint or "")
async with aiohttp.ClientSession() as session:
if self.method == "GET":
request_func = session.get
elif self.method == "POST":
request_func = session.post
elif self.method == "PATCH":
request_func = session.patch
else:
return {"Response": f"Unexpected HTTP Method: {self.method}", "status": "error"}
attempt_num = 1
while True:
response = await request_func(
url,
json=data if self.method in ("POST", "PATCH") else None,
params=data if self.method == "GET" else None,
headers=headers,
auth=auth,
**extra_options,
)
try:
response.raise_for_status()
return await response.json()
except ClientResponseError as e:
self.log.warning(
"[Try %d of %d] Request to %s failed.",
attempt_num,
self.retry_limit,
url,
)
if not self._retryable_error_async(e) or attempt_num == self.retry_limit:
self.log.exception("HTTP error, status code: %s", e.status)
# In this case, the user probably made a mistake.
# Don't retry.
return {"Response": {e.message}, "Status Code": {e.status}, "status": "error"}
attempt_num += 1
await asyncio.sleep(self.retry_delay)
def _generate_base_url(self, conn: Connection) -> str:
if conn.host and "://" in conn.host:
base_url: str = conn.host
else:
# schema defaults to HTTP
schema = conn.schema if conn.schema else "http"
host = conn.host if conn.host else ""
base_url = f"{schema}://{host}"
if conn.port:
base_url = f"{base_url}:{conn.port}"
return base_url
async def run_method(
self,
endpoint: str,
method: str = "GET",
data: Any | None = None,
headers: dict[str, Any] | None = None,
) -> Any:
"""
Wrapper for HttpAsyncHook, allows to change method on the same HttpAsyncHook.
:param method: http method
:param endpoint: endpoint
:param data: request payload
:param headers: headers
:return: http response
"""
if method not in ("GET", "POST", "PUT", "DELETE", "HEAD"):
return {"status": "error", "response": f"Invalid http method {method}"}
back_method = self.method
self.method = method
try:
result = await self._do_api_call_async(endpoint, data, headers, self.extra_options)
finally:
self.method = back_method
return {"status": "success", "response": result}
async def get_batch_state(self, session_id: int | str) -> Any:
"""
Fetch the state of the specified batch asynchronously.
:param session_id: identifier of the batch sessions
:return: batch state
"""
self._validate_session_id(session_id)
self.log.info("Fetching info for batch session %s", session_id)
result = await self.run_method(endpoint=f"/batches/{session_id}/state")
if result["status"] == "error":
self.log.info(result)
return {"batch_state": "error", "response": result, "status": "error"}
if "state" not in result["response"]:
self.log.info(
"batch_state: error with as it is unable to get state for batch with id: %s", session_id
)
return {
"batch_state": "error",
"response": f"Unable to get state for batch with id: {session_id}",
"status": "error",
}
self.log.info("Successfully fetched the batch state.")
return {
"batch_state": BatchState(result["response"]["state"]),
"response": "successfully fetched the batch state.",
"status": "success",
}
async def get_batch_logs(
self, session_id: int | str, log_start_position: int, log_batch_size: int
) -> Any:
"""
Gets the session logs for a specified batch asynchronously.
:param session_id: identifier of the batch sessions
:param log_start_position: Position from where to pull the logs
:param log_batch_size: Number of lines to pull in one batch
:return: response body
"""
self._validate_session_id(session_id)
log_params = {"from": log_start_position, "size": log_batch_size}
result = await self.run_method(endpoint=f"/batches/{session_id}/log", data=log_params)
if result["status"] == "error":
self.log.info(result)
return {"response": result["response"], "status": "error"}
return {"response": result["response"], "status": "success"}
async def dump_batch_logs(self, session_id: int | str) -> Any:
"""
Dumps the session logs for a specified batch asynchronously.
:param session_id: identifier of the batch sessions
:return: response body
"""
self.log.info("Fetching the logs for batch session with id: %s", session_id)
log_start_line = 0
log_total_lines = 0
log_batch_size = 100
while log_start_line <= log_total_lines:
# Livy log endpoint is paginated.
result = await self.get_batch_logs(session_id, log_start_line, log_batch_size)
if result["status"] == "success":
log_start_line += log_batch_size
log_lines = self._parse_request_response(result["response"], "log")
for log_line in log_lines:
self.log.info(log_line)
return log_lines
else:
self.log.info(result["response"])
return result["response"]
@staticmethod
def _validate_session_id(session_id: int | str) -> None:
"""
Validate session id is a int.
:param session_id: session id
"""
try:
int(session_id)
except (TypeError, ValueError):
raise TypeError("'session_id' must be an integer")
@staticmethod
def _parse_post_response(response: dict[Any, Any]) -> Any:
"""
Parse batch response for batch id.
:param response: response body
:return: session id
"""
return response.get("id")
@staticmethod
def _parse_request_response(response: dict[Any, Any], parameter: Any) -> Any:
"""
Parse batch response for batch id.
:param response: response body
:return: value of parameter
"""
return response.get(parameter)
@staticmethod
def build_post_batch_body(
file: str,
args: Sequence[str | int | float] | None = None,
class_name: str | None = None,
jars: list[str] | None = None,
py_files: list[str] | None = None,
files: list[str] | None = None,
archives: list[str] | None = None,
name: str | None = None,
driver_memory: str | None = None,
driver_cores: int | str | None = None,
executor_memory: str | None = None,
executor_cores: int | None = None,
num_executors: int | str | None = None,
queue: str | None = None,
proxy_user: str | None = None,
conf: dict[Any, Any] | None = None,
) -> dict[str, Any]:
"""
Build the post batch request body.
:param file: Path of the file containing the application to execute (required).
:param proxy_user: User to impersonate when running the job.
:param class_name: Application Java/Spark main class string.
:param args: Command line arguments for the application s.
:param jars: jars to be used in this sessions.
:param py_files: Python files to be used in this session.
:param files: files to be used in this session.
:param driver_memory: Amount of memory to use for the driver process string.
:param driver_cores: Number of cores to use for the driver process int.
:param executor_memory: Amount of memory to use per executor process string.
:param executor_cores: Number of cores to use for each executor int.
:param num_executors: Number of executors to launch for this session int.
:param archives: Archives to be used in this session.
:param queue: The name of the YARN queue to which submitted string.
:param name: The name of this session string.
:param conf: Spark configuration properties.
:return: request body
"""
body: dict[str, Any] = {"file": file}
if proxy_user:
body["proxyUser"] = proxy_user
if class_name:
body["className"] = class_name
if args and LivyAsyncHook._validate_list_of_stringables(args):
body["args"] = [str(val) for val in args]
if jars and LivyAsyncHook._validate_list_of_stringables(jars):
body["jars"] = jars
if py_files and LivyAsyncHook._validate_list_of_stringables(py_files):
body["pyFiles"] = py_files
if files and LivyAsyncHook._validate_list_of_stringables(files):
body["files"] = files
if driver_memory and LivyAsyncHook._validate_size_format(driver_memory):
body["driverMemory"] = driver_memory
if driver_cores:
body["driverCores"] = driver_cores
if executor_memory and LivyAsyncHook._validate_size_format(executor_memory):
body["executorMemory"] = executor_memory
if executor_cores:
body["executorCores"] = executor_cores
if num_executors:
body["numExecutors"] = num_executors
if archives and LivyAsyncHook._validate_list_of_stringables(archives):
body["archives"] = archives
if queue:
body["queue"] = queue
if name:
body["name"] = name
if conf and LivyAsyncHook._validate_extra_conf(conf):
body["conf"] = conf
return body
@staticmethod
def _validate_size_format(size: str) -> bool:
"""
Validate size format.
:param size: size value
:return: true if valid format
"""
if size and not (isinstance(size, str) and re.match(r"^\d+[kmgt]b?$", size, re.IGNORECASE)):
raise ValueError(f"Invalid java size format for string'{size}'")
return True
@staticmethod
def _validate_list_of_stringables(vals: Sequence[str | int | float]) -> bool:
"""
Check the values in the provided list can be converted to strings.
:param vals: list to validate
:return: true if valid
"""
if (
vals is None
or not isinstance(vals, (tuple, list))
or any(1 for val in vals if not isinstance(val, (str, int, float)))
):
raise ValueError("List of strings expected")
return True
@staticmethod
def _validate_extra_conf(conf: dict[Any, Any]) -> bool:
"""
Check configuration values are either strings or ints.
:param conf: configuration variable
:return: true if valid
"""
if conf:
if not isinstance(conf, dict):
raise ValueError("'conf' argument must be a dict")
if any(True for k, v in conf.items() if not (v and isinstance(v, str) or isinstance(v, int))):
raise ValueError("'conf' values must be either strings or ints")
return True
| 31,553 | 36.698925 | 109 | py |
airflow | airflow-main/airflow/providers/apache/livy/sensors/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/livy/sensors/livy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains the Apache Livy sensor."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.providers.apache.livy.hooks.livy import LivyHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class LivySensor(BaseSensorOperator):
"""
Monitor a Livy sessions for termination.
:param livy_conn_id: reference to a pre-defined Livy connection
:param batch_id: identifier of the monitored batch
depends on the option that's being modified.
"""
template_fields: Sequence[str] = ("batch_id",)
def __init__(
self,
*,
batch_id: int | str,
livy_conn_id: str = "livy_default",
livy_conn_auth_type: Any | None = None,
extra_options: dict[str, Any] | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.batch_id = batch_id
self._livy_conn_id = livy_conn_id
self._livy_conn_auth_type = livy_conn_auth_type
self._livy_hook: LivyHook | None = None
self._extra_options = extra_options or {}
def get_hook(self) -> LivyHook:
"""
Get valid hook.
:return: hook
"""
if self._livy_hook is None or not isinstance(self._livy_hook, LivyHook):
self._livy_hook = LivyHook(
livy_conn_id=self._livy_conn_id,
extra_options=self._extra_options,
auth_type=self._livy_conn_auth_type,
)
return self._livy_hook
def poke(self, context: Context) -> bool:
batch_id = self.batch_id
status = self.get_hook().get_batch_state(batch_id)
return status in self.get_hook().TERMINAL_STATES
| 2,574 | 33.333333 | 80 | py |
airflow | airflow-main/airflow/providers/apache/drill/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "2.4.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-drill:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,537 | 35.619048 | 121 | py |
airflow | airflow-main/airflow/providers/apache/drill/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/drill/operators/drill.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from typing import Sequence
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.common.sql.operators.sql import SQLExecuteQueryOperator
class DrillOperator(SQLExecuteQueryOperator):
"""
Executes the provided SQL in the identified Drill environment.
This class is deprecated.
Please use :class:`airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator`.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DrillOperator`
:param sql: the SQL code to be executed as a single string, or
a list of str (sql statements), or a reference to a template file.
Template references are recognized by str ending in '.sql'
:param drill_conn_id: id of the connection config for the target Drill
environment
:param parameters: (optional) the parameters to render the SQL query with.
"""
template_fields: Sequence[str] = ("sql",)
template_fields_renderers = {"sql": "sql"}
template_ext: Sequence[str] = (".sql",)
ui_color = "#ededed"
def __init__(self, *, drill_conn_id: str = "drill_default", **kwargs) -> None:
super().__init__(conn_id=drill_conn_id, **kwargs)
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator`.""",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
| 2,346 | 38.116667 | 96 | py |
airflow | airflow-main/airflow/providers/apache/drill/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/drill/hooks/drill.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import Any, Iterable
from sqlalchemy import create_engine
from sqlalchemy.engine import Connection
from airflow.providers.common.sql.hooks.sql import DbApiHook
class DrillHook(DbApiHook):
"""
Interact with Apache Drill via sqlalchemy-drill.
You can specify the SQLAlchemy dialect and driver that sqlalchemy-drill
will employ to communicate with Drill in the extras field of your
connection, e.g. ``{"dialect_driver": "drill+sadrill"}`` for communication
over Drill's REST API. See the sqlalchemy-drill documentation for
descriptions of the supported dialects and drivers.
You can specify the default storage_plugin for the sqlalchemy-drill
connection using the extras field e.g. ``{"storage_plugin": "dfs"}``.
"""
conn_name_attr = "drill_conn_id"
default_conn_name = "drill_default"
conn_type = "drill"
hook_name = "Drill"
supports_autocommit = False
def get_conn(self) -> Connection:
"""Establish a connection to Drillbit."""
conn_md = self.get_connection(getattr(self, self.conn_name_attr))
creds = f"{conn_md.login}:{conn_md.password}@" if conn_md.login else ""
if "/" in conn_md.host or "&" in conn_md.host:
raise ValueError("Drill host should not contain '/&' characters")
engine = create_engine(
f'{conn_md.extra_dejson.get("dialect_driver", "drill+sadrill")}://{creds}'
f"{conn_md.host}:{conn_md.port}/"
f'{conn_md.extra_dejson.get("storage_plugin", "dfs")}'
)
self.log.info(
"Connected to the Drillbit at %s:%s as user %s", conn_md.host, conn_md.port, conn_md.login
)
return engine.raw_connection()
def get_uri(self) -> str:
"""
Returns the connection URI.
e.g: ``drill://localhost:8047/dfs``
"""
conn_md = self.get_connection(getattr(self, self.conn_name_attr))
host = conn_md.host
if conn_md.port is not None:
host += f":{conn_md.port}"
conn_type = conn_md.conn_type or "drill"
dialect_driver = conn_md.extra_dejson.get("dialect_driver", "drill+sadrill")
storage_plugin = conn_md.extra_dejson.get("storage_plugin", "dfs")
return f"{conn_type}://{host}/{storage_plugin}?dialect_driver={dialect_driver}"
def set_autocommit(self, conn: Connection, autocommit: bool) -> NotImplementedError:
raise NotImplementedError("There are no transactions in Drill.")
def insert_rows(
self,
table: str,
rows: Iterable[tuple[str]],
target_fields: Iterable[str] | None = None,
commit_every: int = 1000,
replace: bool = False,
**kwargs: Any,
) -> NotImplementedError:
raise NotImplementedError("There is no INSERT statement in Drill.")
| 3,660 | 38.365591 | 102 | py |
airflow | airflow-main/airflow/providers/apache/sqoop/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-sqoop:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,537 | 35.619048 | 121 | py |
airflow | airflow-main/airflow/providers/apache/sqoop/operators/sqoop.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a sqoop 1 operator."""
from __future__ import annotations
import os
import signal
from typing import TYPE_CHECKING, Any, Sequence
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.sqoop.hooks.sqoop import SqoopHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SqoopOperator(BaseOperator):
"""
Execute a Sqoop job.
Documentation for Apache Sqoop can be found here: https://sqoop.apache.org/docs/1.4.2/SqoopUserGuide.html
:param conn_id: str
:param cmd_type: str specify command to execute "export" or "import"
:param schema: Schema name
:param table: Table to read
:param query: Import result of arbitrary SQL query. Instead of using the table,
columns and where arguments, you can specify a SQL statement with the query
argument. Must also specify a destination directory with target_dir.
:param target_dir: HDFS destination directory where the data
from the rdbms will be written
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" Imports data to
into the specified format. Defaults to text.
:param columns: <col,col,col> Columns to import from table
:param num_mappers: Use n mapper tasks to import/export in parallel
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param export_dir: HDFS Hive database directory to export to the rdbms
:param input_null_string: The string to be interpreted as null
for string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the input field separator
:param input_lines_terminated_by: Sets the input end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param direct: Use direct export fast path
:param driver: Manually specify JDBC driver class to use
:param verbose: Switch to more verbose logging for debug purposes
:param relaxed_isolation: use read uncommitted isolation level
:param hcatalog_database: Specifies the database name for the HCatalog table
:param hcatalog_table: The argument value for this option is the HCatalog table
:param create_hcatalog_table: Have sqoop create the hcatalog table passed
in or not
:param properties: additional JVM properties passed to sqoop
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
:param extra_export_options: Extra export options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
:param libjars: Optional Comma separated jar files to include in the classpath.
"""
template_fields: Sequence[str] = (
"conn_id",
"cmd_type",
"table",
"query",
"target_dir",
"file_type",
"columns",
"split_by",
"where",
"export_dir",
"input_null_string",
"input_null_non_string",
"staging_table",
"enclosed_by",
"escaped_by",
"input_fields_terminated_by",
"input_lines_terminated_by",
"input_optionally_enclosed_by",
"properties",
"extra_import_options",
"driver",
"extra_export_options",
"hcatalog_database",
"hcatalog_table",
"schema",
)
template_fields_renderers = {"query": "sql"}
ui_color = "#7D8CA4"
def __init__(
self,
*,
conn_id: str = "sqoop_default",
cmd_type: str = "import",
table: str | None = None,
query: str | None = None,
target_dir: str | None = None,
append: bool = False,
file_type: str = "text",
columns: str | None = None,
num_mappers: int | None = None,
split_by: str | None = None,
where: str | None = None,
export_dir: str | None = None,
input_null_string: str | None = None,
input_null_non_string: str | None = None,
staging_table: str | None = None,
clear_staging_table: bool = False,
enclosed_by: str | None = None,
escaped_by: str | None = None,
input_fields_terminated_by: str | None = None,
input_lines_terminated_by: str | None = None,
input_optionally_enclosed_by: str | None = None,
batch: bool = False,
direct: bool = False,
driver: Any | None = None,
verbose: bool = False,
relaxed_isolation: bool = False,
properties: dict[str, Any] | None = None,
hcatalog_database: str | None = None,
hcatalog_table: str | None = None,
create_hcatalog_table: bool = False,
extra_import_options: dict[str, Any] | None = None,
extra_export_options: dict[str, Any] | None = None,
schema: str | None = None,
libjars: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.conn_id = conn_id
self.cmd_type = cmd_type
self.table = table
self.query = query
self.target_dir = target_dir
self.append = append
self.file_type = file_type
self.columns = columns
self.num_mappers = num_mappers
self.split_by = split_by
self.where = where
self.export_dir = export_dir
self.input_null_string = input_null_string
self.input_null_non_string = input_null_non_string
self.staging_table = staging_table
self.clear_staging_table = clear_staging_table
self.enclosed_by = enclosed_by
self.escaped_by = escaped_by
self.input_fields_terminated_by = input_fields_terminated_by
self.input_lines_terminated_by = input_lines_terminated_by
self.input_optionally_enclosed_by = input_optionally_enclosed_by
self.batch = batch
self.direct = direct
self.driver = driver
self.verbose = verbose
self.relaxed_isolation = relaxed_isolation
self.hcatalog_database = hcatalog_database
self.hcatalog_table = hcatalog_table
self.create_hcatalog_table = create_hcatalog_table
self.properties = properties
self.extra_import_options = extra_import_options or {}
self.extra_export_options = extra_export_options or {}
self.hook: SqoopHook | None = None
self.schema = schema
self.libjars = libjars
def execute(self, context: Context) -> None:
"""Execute sqoop job."""
if self.hook is None:
self.hook = self._get_hook()
if self.cmd_type == "export":
self.hook.export_table(
table=self.table, # type: ignore
export_dir=self.export_dir,
input_null_string=self.input_null_string,
input_null_non_string=self.input_null_non_string,
staging_table=self.staging_table,
clear_staging_table=self.clear_staging_table,
enclosed_by=self.enclosed_by,
escaped_by=self.escaped_by,
input_fields_terminated_by=self.input_fields_terminated_by,
input_lines_terminated_by=self.input_lines_terminated_by,
input_optionally_enclosed_by=self.input_optionally_enclosed_by,
batch=self.batch,
relaxed_isolation=self.relaxed_isolation,
extra_export_options=self.extra_export_options,
schema=self.schema,
)
elif self.cmd_type == "import":
# add create hcatalog table to extra import options if option passed
# if new params are added to constructor can pass them in here
# so don't modify sqoop_hook for each param
if self.create_hcatalog_table:
self.extra_import_options["create-hcatalog-table"] = ""
if self.table and self.query:
raise AirflowException("Cannot specify query and table together. Need to specify either or.")
if self.table:
self.hook.import_table(
table=self.table,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
columns=self.columns,
split_by=self.split_by,
where=self.where,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options,
schema=self.schema,
)
elif self.query:
self.hook.import_query(
query=self.query,
target_dir=self.target_dir,
append=self.append,
file_type=self.file_type,
split_by=self.split_by,
direct=self.direct,
driver=self.driver,
extra_import_options=self.extra_import_options,
)
else:
raise AirflowException("Provide query or table parameter to import using Sqoop")
else:
raise AirflowException("cmd_type should be 'import' or 'export'")
def on_kill(self) -> None:
if self.hook is None:
self.hook = self._get_hook()
self.log.info("Sending SIGTERM signal to bash process group")
os.killpg(os.getpgid(self.hook.sub_process_pid), signal.SIGTERM)
def _get_hook(self) -> SqoopHook:
return SqoopHook(
conn_id=self.conn_id,
verbose=self.verbose,
num_mappers=self.num_mappers,
hcatalog_database=self.hcatalog_database,
hcatalog_table=self.hcatalog_table,
properties=self.properties,
libjars=self.libjars,
)
| 11,403 | 40.772894 | 109 | py |
airflow | airflow-main/airflow/providers/apache/sqoop/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/sqoop/hooks/sqoop.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains a sqoop 1.x hook."""
from __future__ import annotations
import subprocess
from copy import deepcopy
from typing import Any
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
class SqoopHook(BaseHook):
"""Wrapper around the sqoop 1 binary.
To be able to use the hook, it is required that "sqoop" is in the PATH.
Additional arguments that can be passed via the 'extra' JSON field of the
sqoop connection:
* ``job_tracker``: Job tracker local|jobtracker:port.
* ``namenode``: Namenode.
* ``files``: Comma separated files to be copied to the map reduce cluster.
* ``archives``: Comma separated archives to be unarchived on the compute
machines.
* ``password_file``: Path to file containing the password.
:param conn_id: Reference to the sqoop connection.
:param verbose: Set sqoop to verbose.
:param num_mappers: Number of map tasks to import in parallel.
:param properties: Properties to set via the -D argument
:param libjars: Optional Comma separated jar files to include in the classpath.
"""
conn_name_attr = "conn_id"
default_conn_name = "sqoop_default"
conn_type = "sqoop"
hook_name = "Sqoop"
def __init__(
self,
conn_id: str = default_conn_name,
verbose: bool = False,
num_mappers: int | None = None,
hcatalog_database: str | None = None,
hcatalog_table: str | None = None,
properties: dict[str, Any] | None = None,
libjars: str | None = None,
) -> None:
# No mutable types in the default parameters
super().__init__()
self.conn = self.get_connection(conn_id)
connection_parameters = self.conn.extra_dejson
self.job_tracker = connection_parameters.get("job_tracker", None)
self.namenode = connection_parameters.get("namenode", None)
self.libjars = libjars
self.files = connection_parameters.get("files", None)
self.archives = connection_parameters.get("archives", None)
self.password_file = connection_parameters.get("password_file", None)
self.hcatalog_database = hcatalog_database
self.hcatalog_table = hcatalog_table
self.verbose = verbose
self.num_mappers = num_mappers
self.properties = properties or {}
self.sub_process_pid: int
self.log.info("Using connection to: %s:%s/%s", self.conn.host, self.conn.port, self.conn.schema)
def get_conn(self) -> Any:
return self.conn
def cmd_mask_password(self, cmd_orig: list[str]) -> list[str]:
"""Mask command password for safety."""
cmd = deepcopy(cmd_orig)
try:
password_index = cmd.index("--password")
cmd[password_index + 1] = "MASKED"
except ValueError:
self.log.debug("No password in sqoop cmd")
return cmd
def popen(self, cmd: list[str], **kwargs: Any) -> None:
"""Remote Popen.
:param cmd: command to remotely execute
:param kwargs: extra arguments to Popen (see subprocess.Popen)
:return: handle to subprocess
"""
masked_cmd = " ".join(self.cmd_mask_password(cmd))
self.log.info("Executing command: %s", masked_cmd)
with subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, **kwargs) as sub_process:
self.sub_process_pid = sub_process.pid
for line in iter(sub_process.stdout): # type: ignore
self.log.info(line.strip())
sub_process.wait()
self.log.info("Command exited with return code %s", sub_process.returncode)
if sub_process.returncode:
raise AirflowException(f"Sqoop command failed: {masked_cmd}")
def _prepare_command(self, export: bool = False) -> list[str]:
sqoop_cmd_type = "export" if export else "import"
connection_cmd = ["sqoop", sqoop_cmd_type]
for key, value in self.properties.items():
connection_cmd += ["-D", f"{key}={value}"]
if self.namenode:
connection_cmd += ["-fs", self.namenode]
if self.job_tracker:
connection_cmd += ["-jt", self.job_tracker]
if self.libjars:
connection_cmd += ["-libjars", self.libjars]
if self.files:
connection_cmd += ["-files", self.files]
if self.archives:
connection_cmd += ["-archives", self.archives]
if self.conn.login:
connection_cmd += ["--username", self.conn.login]
if self.conn.password:
connection_cmd += ["--password", self.conn.password]
if self.password_file:
connection_cmd += ["--password-file", self.password_file]
if self.verbose:
connection_cmd += ["--verbose"]
if self.num_mappers:
connection_cmd += ["--num-mappers", str(self.num_mappers)]
if self.hcatalog_database:
connection_cmd += ["--hcatalog-database", self.hcatalog_database]
if self.hcatalog_table:
connection_cmd += ["--hcatalog-table", self.hcatalog_table]
connect_str = self.conn.host
if self.conn.port:
connect_str += f":{self.conn.port}"
if self.conn.schema:
self.log.info("CONNECTION TYPE %s", self.conn.conn_type)
if self.conn.conn_type != "mssql":
connect_str += f"/{self.conn.schema}"
else:
connect_str += f";databaseName={self.conn.schema}"
connection_cmd += ["--connect", connect_str]
return connection_cmd
@staticmethod
def _get_export_format_argument(file_type: str = "text") -> list[str]:
if file_type == "avro":
return ["--as-avrodatafile"]
elif file_type == "sequence":
return ["--as-sequencefile"]
elif file_type == "parquet":
return ["--as-parquetfile"]
elif file_type == "text":
return ["--as-textfile"]
else:
raise AirflowException("Argument file_type should be 'avro', 'sequence', 'parquet' or 'text'.")
def _import_cmd(
self,
target_dir: str | None,
append: bool,
file_type: str,
split_by: str | None,
direct: bool | None,
driver: Any,
extra_import_options: Any,
) -> list[str]:
cmd = self._prepare_command(export=False)
if target_dir:
cmd += ["--target-dir", target_dir]
if append:
cmd += ["--append"]
cmd += self._get_export_format_argument(file_type)
if split_by:
cmd += ["--split-by", split_by]
if direct:
cmd += ["--direct"]
if driver:
cmd += ["--driver", driver]
if extra_import_options:
for key, value in extra_import_options.items():
cmd += [f"--{key}"]
if value:
cmd += [str(value)]
return cmd
def import_table(
self,
table: str,
target_dir: str | None = None,
append: bool = False,
file_type: str = "text",
columns: str | None = None,
split_by: str | None = None,
where: str | None = None,
direct: bool = False,
driver: Any = None,
extra_import_options: dict[str, Any] | None = None,
schema: str | None = None,
) -> Any:
"""Import table from remote location to target dir.
Arguments are copies of direct sqoop command line arguments.
:param table: Table to read
:param schema: Schema name
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet".
Imports data to into the specified format. Defaults to text.
:param columns: <col,col,col…> Columns to import from table
:param split_by: Column of the table used to split work units
:param where: WHERE clause to use during import
:param direct: Use direct connector if exists for the database
:param driver: Manually specify JDBC driver class to use
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options)
cmd += ["--table", table]
if columns:
cmd += ["--columns", columns]
if where:
cmd += ["--where", where]
if schema:
cmd += ["--", "--schema", schema]
self.popen(cmd)
def import_query(
self,
query: str,
target_dir: str | None = None,
append: bool = False,
file_type: str = "text",
split_by: str | None = None,
direct: bool | None = None,
driver: Any | None = None,
extra_import_options: dict[str, Any] | None = None,
) -> Any:
"""Import a specific query from the rdbms to hdfs.
:param query: Free format query to run
:param target_dir: HDFS destination dir
:param append: Append data to an existing dataset in HDFS
:param file_type: "avro", "sequence", "text" or "parquet"
Imports data to hdfs into the specified format. Defaults to text.
:param split_by: Column of the table used to split work units
:param direct: Use direct import fast path
:param driver: Manually specify JDBC driver class to use
:param extra_import_options: Extra import options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._import_cmd(target_dir, append, file_type, split_by, direct, driver, extra_import_options)
cmd += ["--query", query]
self.popen(cmd)
def _export_cmd(
self,
table: str,
export_dir: str | None = None,
input_null_string: str | None = None,
input_null_non_string: str | None = None,
staging_table: str | None = None,
clear_staging_table: bool = False,
enclosed_by: str | None = None,
escaped_by: str | None = None,
input_fields_terminated_by: str | None = None,
input_lines_terminated_by: str | None = None,
input_optionally_enclosed_by: str | None = None,
batch: bool = False,
relaxed_isolation: bool = False,
extra_export_options: dict[str, Any] | None = None,
schema: str | None = None,
) -> list[str]:
cmd = self._prepare_command(export=True)
if input_null_string:
cmd += ["--input-null-string", input_null_string]
if input_null_non_string:
cmd += ["--input-null-non-string", input_null_non_string]
if staging_table:
cmd += ["--staging-table", staging_table]
if clear_staging_table:
cmd += ["--clear-staging-table"]
if enclosed_by:
cmd += ["--enclosed-by", enclosed_by]
if escaped_by:
cmd += ["--escaped-by", escaped_by]
if input_fields_terminated_by:
cmd += ["--input-fields-terminated-by", input_fields_terminated_by]
if input_lines_terminated_by:
cmd += ["--input-lines-terminated-by", input_lines_terminated_by]
if input_optionally_enclosed_by:
cmd += ["--input-optionally-enclosed-by", input_optionally_enclosed_by]
if batch:
cmd += ["--batch"]
if relaxed_isolation:
cmd += ["--relaxed-isolation"]
if export_dir:
cmd += ["--export-dir", export_dir]
if extra_export_options:
for key, value in extra_export_options.items():
cmd += [f"--{key}"]
if value:
cmd += [str(value)]
# The required option
cmd += ["--table", table]
if schema:
cmd += ["--", "--schema", schema]
return cmd
def export_table(
self,
table: str,
export_dir: str | None = None,
input_null_string: str | None = None,
input_null_non_string: str | None = None,
staging_table: str | None = None,
clear_staging_table: bool = False,
enclosed_by: str | None = None,
escaped_by: str | None = None,
input_fields_terminated_by: str | None = None,
input_lines_terminated_by: str | None = None,
input_optionally_enclosed_by: str | None = None,
batch: bool = False,
relaxed_isolation: bool = False,
extra_export_options: dict[str, Any] | None = None,
schema: str | None = None,
) -> None:
"""Export Hive table to remote location.
Arguments are copies of direct Sqoop command line Arguments
:param table: Table remote destination
:param schema: Schema name
:param export_dir: Hive table to export
:param input_null_string: The string to be interpreted as null for
string columns
:param input_null_non_string: The string to be interpreted as null
for non-string columns
:param staging_table: The table in which data will be staged before
being inserted into the destination table
:param clear_staging_table: Indicate that any data present in the
staging table can be deleted
:param enclosed_by: Sets a required field enclosing character
:param escaped_by: Sets the escape character
:param input_fields_terminated_by: Sets the field separator character
:param input_lines_terminated_by: Sets the end-of-line character
:param input_optionally_enclosed_by: Sets a field enclosing character
:param batch: Use batch mode for underlying statement execution
:param relaxed_isolation: Transaction isolation to read uncommitted
for the mappers
:param extra_export_options: Extra export options to pass as dict.
If a key doesn't have a value, just pass an empty string to it.
Don't include prefix of -- for sqoop options.
"""
cmd = self._export_cmd(
table,
export_dir,
input_null_string,
input_null_non_string,
staging_table,
clear_staging_table,
enclosed_by,
escaped_by,
input_fields_terminated_by,
input_lines_terminated_by,
input_optionally_enclosed_by,
batch,
relaxed_isolation,
extra_export_options,
schema,
)
self.popen(cmd)
| 15,761 | 36.350711 | 110 | py |
airflow | airflow-main/airflow/providers/apache/sqoop/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/druid/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.4.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-druid:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,537 | 35.619048 | 121 | py |
airflow | airflow-main/airflow/providers/apache/druid/transfers/hive_to_druid.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains operator to move data from Hive to Druid."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.apache.druid.hooks.druid import DruidHook
from airflow.providers.apache.hive.hooks.hive import HiveCliHook, HiveMetastoreHook
if TYPE_CHECKING:
from airflow.utils.context import Context
LOAD_CHECK_INTERVAL = 5
DEFAULT_TARGET_PARTITION_SIZE = 5000000
class HiveToDruidOperator(BaseOperator):
"""Moves data from Hive to Druid.
[del]note that for now the data is loaded into memory before being pushed to
Druid, so this operator should be used for smallish amount of data.[/del]
:param sql: SQL query to execute against the Druid database. (templated)
:param druid_datasource: the datasource you want to ingest into in druid
:param ts_dim: the timestamp dimension
:param metric_spec: the metrics you want to define for your data
:param hive_cli_conn_id: the hive connection id
:param druid_ingest_conn_id: the druid ingest connection id
:param metastore_conn_id: the metastore connection id
:param hadoop_dependency_coordinates: list of coordinates to squeeze
int the ingest json
:param intervals: list of time intervals that defines segments,
this is passed as is to the json object. (templated)
:param num_shards: Directly specify the number of shards to create.
:param target_partition_size: Target number of rows to include in a partition,
:param query_granularity: The minimum granularity to be able to query results at and the granularity of
the data inside the segment. E.g. a value of "minute" will mean that data is aggregated at minutely
granularity. That is, if there are collisions in the tuple (minute(timestamp), dimensions), then it
will aggregate values together using the aggregators instead of storing individual rows.
A granularity of 'NONE' means millisecond granularity.
:param segment_granularity: The granularity to create time chunks at. Multiple segments can be created per
time chunk. For example, with 'DAY' segmentGranularity, the events of the same day fall into the
same time chunk which can be optionally further partitioned into multiple segments based on other
configurations and input size.
:param hive_tblproperties: additional properties for tblproperties in
hive for the staging table
:param job_properties: additional properties for job
"""
template_fields: Sequence[str] = ("sql", "intervals")
template_ext: Sequence[str] = (".sql",)
template_fields_renderers = {"sql": "hql"}
def __init__(
self,
*,
sql: str,
druid_datasource: str,
ts_dim: str,
metric_spec: list[Any] | None = None,
hive_cli_conn_id: str = "hive_cli_default",
druid_ingest_conn_id: str = "druid_ingest_default",
metastore_conn_id: str = "metastore_default",
hadoop_dependency_coordinates: list[str] | None = None,
intervals: list[Any] | None = None,
num_shards: float = -1,
target_partition_size: int = -1,
query_granularity: str = "NONE",
segment_granularity: str = "DAY",
hive_tblproperties: dict[Any, Any] | None = None,
job_properties: dict[Any, Any] | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.sql = sql
self.druid_datasource = druid_datasource
self.ts_dim = ts_dim
self.intervals = intervals or ["{{ ds }}/{{ tomorrow_ds }}"]
self.num_shards = num_shards
self.target_partition_size = target_partition_size
self.query_granularity = query_granularity
self.segment_granularity = segment_granularity
self.metric_spec = metric_spec or [{"name": "count", "type": "count"}]
self.hive_cli_conn_id = hive_cli_conn_id
self.hadoop_dependency_coordinates = hadoop_dependency_coordinates
self.druid_ingest_conn_id = druid_ingest_conn_id
self.metastore_conn_id = metastore_conn_id
self.hive_tblproperties = hive_tblproperties or {}
self.job_properties = job_properties
def execute(self, context: Context) -> None:
hive = HiveCliHook(hive_cli_conn_id=self.hive_cli_conn_id)
self.log.info("Extracting data from Hive")
hive_table = "druid." + context["task_instance_key_str"].replace(".", "_")
sql = self.sql.strip().strip(";")
tblproperties = "".join(f", '{k}' = '{v}'" for k, v in self.hive_tblproperties.items())
hql = f"""\
SET mapred.output.compress=false;
SET hive.exec.compress.output=false;
DROP TABLE IF EXISTS {hive_table};
CREATE TABLE {hive_table}
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\t'
STORED AS TEXTFILE
TBLPROPERTIES ('serialization.null.format' = ''{tblproperties})
AS
{sql}
"""
self.log.info("Running command:\n %s", hql)
hive.run_cli(hql)
meta_hook = HiveMetastoreHook(self.metastore_conn_id)
# Get the Hive table and extract the columns
table = meta_hook.get_table(hive_table)
columns = [col.name for col in table.sd.cols]
# Get the path on hdfs
static_path = meta_hook.get_table(hive_table).sd.location
druid = DruidHook(druid_ingest_conn_id=self.druid_ingest_conn_id)
try:
index_spec = self.construct_ingest_query(
static_path=static_path,
columns=columns,
)
self.log.info("Inserting rows into Druid, hdfs path: %s", static_path)
druid.submit_indexing_job(index_spec)
self.log.info("Load seems to have succeeded!")
finally:
self.log.info("Cleaning up by dropping the temp Hive table %s", hive_table)
hql = f"DROP TABLE IF EXISTS {hive_table}"
hive.run_cli(hql)
def construct_ingest_query(self, static_path: str, columns: list[str]) -> dict[str, Any]:
"""
Builds an ingest query for an HDFS TSV load.
:param static_path: The path on hdfs where the data is
:param columns: List of all the columns that are available
"""
# backward compatibility for num_shards,
# but target_partition_size is the default setting
# and overwrites the num_shards
num_shards = self.num_shards
target_partition_size = self.target_partition_size
if self.target_partition_size == -1:
if self.num_shards == -1:
target_partition_size = DEFAULT_TARGET_PARTITION_SIZE
else:
num_shards = -1
metric_names = [m["fieldName"] for m in self.metric_spec if m["type"] != "count"]
# Take all the columns, which are not the time dimension
# or a metric, as the dimension columns
dimensions = [c for c in columns if c not in metric_names and c != self.ts_dim]
ingest_query_dict: dict[str, Any] = {
"type": "index_hadoop",
"spec": {
"dataSchema": {
"metricsSpec": self.metric_spec,
"granularitySpec": {
"queryGranularity": self.query_granularity,
"intervals": self.intervals,
"type": "uniform",
"segmentGranularity": self.segment_granularity,
},
"parser": {
"type": "string",
"parseSpec": {
"columns": columns,
"dimensionsSpec": {
"dimensionExclusions": [],
"dimensions": dimensions, # list of names
"spatialDimensions": [],
},
"timestampSpec": {"column": self.ts_dim, "format": "auto"},
"format": "tsv",
},
},
"dataSource": self.druid_datasource,
},
"tuningConfig": {
"type": "hadoop",
"jobProperties": {
"mapreduce.job.user.classpath.first": "false",
"mapreduce.map.output.compress": "false",
"mapreduce.output.fileoutputformat.compress": "false",
},
"partitionsSpec": {
"type": "hashed",
"targetPartitionSize": target_partition_size,
"numShards": num_shards,
},
},
"ioConfig": {"inputSpec": {"paths": static_path, "type": "static"}, "type": "hadoop"},
},
}
if self.job_properties:
ingest_query_dict["spec"]["tuningConfig"]["jobProperties"].update(self.job_properties)
if self.hadoop_dependency_coordinates:
ingest_query_dict["hadoopDependencyCoordinates"] = self.hadoop_dependency_coordinates
return ingest_query_dict
| 10,081 | 43.026201 | 110 | py |
airflow | airflow-main/airflow/providers/apache/druid/transfers/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/druid/operators/druid_check.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import warnings
from airflow.exceptions import AirflowProviderDeprecationWarning
from airflow.providers.common.sql.operators.sql import SQLCheckOperator
class DruidCheckOperator(SQLCheckOperator):
"""
This class is deprecated.
Please use :class:`airflow.providers.common.sql.operators.sql.SQLCheckOperator`.
"""
def __init__(self, druid_broker_conn_id: str = "druid_broker_default", **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.common.sql.operators.sql.SQLCheckOperator`.""",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(conn_id=druid_broker_conn_id, **kwargs)
| 1,544 | 36.682927 | 89 | py |
airflow | airflow-main/airflow/providers/apache/druid/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/druid/operators/druid.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.apache.druid.hooks.druid import DruidHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class DruidOperator(BaseOperator):
"""
Allows to submit a task directly to druid.
:param json_index_file: The filepath to the druid index specification
:param druid_ingest_conn_id: The connection id of the Druid overlord which
accepts index jobs
:param timeout: The interval (in seconds) between polling the Druid job for the status
of the ingestion job. Must be greater than or equal to 1
:param max_ingestion_time: The maximum ingestion time before assuming the job failed
"""
template_fields: Sequence[str] = ("json_index_file",)
template_ext: Sequence[str] = (".json",)
template_fields_renderers = {"json_index_file": "json"}
def __init__(
self,
*,
json_index_file: str,
druid_ingest_conn_id: str = "druid_ingest_default",
timeout: int = 1,
max_ingestion_time: int | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.json_index_file = json_index_file
self.conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
def execute(self, context: Context) -> None:
hook = DruidHook(
druid_ingest_conn_id=self.conn_id,
timeout=self.timeout,
max_ingestion_time=self.max_ingestion_time,
)
self.log.info("Submitting %s", self.json_index_file)
hook.submit_indexing_job(self.json_index_file)
| 2,526 | 36.161765 | 90 | py |
airflow | airflow-main/airflow/providers/apache/druid/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/druid/hooks/druid.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from typing import Any, Iterable
import requests
from pydruid.db import connect
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.providers.common.sql.hooks.sql import DbApiHook
class DruidHook(BaseHook):
"""
Connection to Druid overlord for ingestion.
To connect to a Druid cluster that is secured with the druid-basic-security
extension, add the username and password to the druid ingestion connection.
:param druid_ingest_conn_id: The connection id to the Druid overlord machine
which accepts index jobs
:param timeout: The interval between polling
the Druid job for the status of the ingestion job.
Must be greater than or equal to 1
:param max_ingestion_time: The maximum ingestion time before assuming the job failed
"""
def __init__(
self,
druid_ingest_conn_id: str = "druid_ingest_default",
timeout: int = 1,
max_ingestion_time: int | None = None,
) -> None:
super().__init__()
self.druid_ingest_conn_id = druid_ingest_conn_id
self.timeout = timeout
self.max_ingestion_time = max_ingestion_time
self.header = {"content-type": "application/json"}
if self.timeout < 1:
raise ValueError("Druid timeout should be equal or greater than 1")
def get_conn_url(self) -> str:
"""Get Druid connection url."""
conn = self.get_connection(self.druid_ingest_conn_id)
host = conn.host
port = conn.port
conn_type = conn.conn_type or "http"
endpoint = conn.extra_dejson.get("endpoint", "")
return f"{conn_type}://{host}:{port}/{endpoint}"
def get_auth(self) -> requests.auth.HTTPBasicAuth | None:
"""
Return username and password from connections tab as requests.auth.HTTPBasicAuth object.
If these details have not been set then returns None.
"""
conn = self.get_connection(self.druid_ingest_conn_id)
user = conn.login
password = conn.password
if user is not None and password is not None:
return requests.auth.HTTPBasicAuth(user, password)
else:
return None
def submit_indexing_job(self, json_index_spec: dict[str, Any] | str) -> None:
"""Submit Druid ingestion job."""
url = self.get_conn_url()
self.log.info("Druid ingestion spec: %s", json_index_spec)
req_index = requests.post(url, data=json_index_spec, headers=self.header, auth=self.get_auth())
code = req_index.status_code
if code != 200:
self.log.error("Error submitting the Druid job to %s (%s) %s", url, code, req_index.content)
raise AirflowException(f"Did not get 200 when submitting the Druid job to {url}")
req_json = req_index.json()
# Wait until the job is completed
druid_task_id = req_json["task"]
self.log.info("Druid indexing task-id: %s", druid_task_id)
running = True
sec = 0
while running:
req_status = requests.get(f"{url}/{druid_task_id}/status", auth=self.get_auth())
self.log.info("Job still running for %s seconds...", sec)
if self.max_ingestion_time and sec > self.max_ingestion_time:
# ensure that the job gets killed if the max ingestion time is exceeded
requests.post(f"{url}/{druid_task_id}/shutdown", auth=self.get_auth())
raise AirflowException(f"Druid ingestion took more than {self.max_ingestion_time} seconds")
time.sleep(self.timeout)
sec += self.timeout
status = req_status.json()["status"]["status"]
if status == "RUNNING":
running = True
elif status == "SUCCESS":
running = False # Great success!
elif status == "FAILED":
raise AirflowException("Druid indexing job failed, check console for more info")
else:
raise AirflowException(f"Could not get status of the job, got {status}")
self.log.info("Successful index")
class DruidDbApiHook(DbApiHook):
"""
Interact with Druid broker.
This hook is purely for users to query druid broker.
For ingestion, please use druidHook.
"""
conn_name_attr = "druid_broker_conn_id"
default_conn_name = "druid_broker_default"
conn_type = "druid"
hook_name = "Druid"
supports_autocommit = False
def get_conn(self) -> connect:
"""Establish a connection to druid broker."""
conn = self.get_connection(getattr(self, self.conn_name_attr))
druid_broker_conn = connect(
host=conn.host,
port=conn.port,
path=conn.extra_dejson.get("endpoint", "/druid/v2/sql"),
scheme=conn.extra_dejson.get("schema", "http"),
user=conn.login,
password=conn.password,
)
self.log.info("Get the connection to druid broker on %s using user %s", conn.host, conn.login)
return druid_broker_conn
def get_uri(self) -> str:
"""
Get the connection uri for druid broker.
e.g: druid://localhost:8082/druid/v2/sql/
"""
conn = self.get_connection(getattr(self, self.conn_name_attr))
host = conn.host
if conn.port is not None:
host += f":{conn.port}"
conn_type = conn.conn_type or "druid"
endpoint = conn.extra_dejson.get("endpoint", "druid/v2/sql")
return f"{conn_type}://{host}/{endpoint}"
def set_autocommit(self, conn: connect, autocommit: bool) -> NotImplementedError:
raise NotImplementedError()
def insert_rows(
self,
table: str,
rows: Iterable[tuple[str]],
target_fields: Iterable[str] | None = None,
commit_every: int = 1000,
replace: bool = False,
**kwargs: Any,
) -> NotImplementedError:
raise NotImplementedError()
| 6,903 | 35.919786 | 107 | py |
airflow | airflow-main/airflow/providers/apache/pig/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.1.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-pig:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,535 | 35.571429 | 119 | py |
airflow | airflow-main/airflow/providers/apache/pig/operators/pig.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.apache.pig.hooks.pig import PigCliHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class PigOperator(BaseOperator):
"""
Executes pig script.
:param pig: the pig latin script to be executed. (templated)
:param pig_cli_conn_id: reference to the Hive database
:param pigparams_jinja_translate: when True, pig params-type templating
${var} gets translated into jinja-type templating {{ var }}. Note that
you may want to use this along with the
``DAG(user_defined_macros=myargs)`` parameter. View the DAG
object documentation for more details.
:param pig_opts: pig options, such as: -x tez, -useHCatalog, ... - space separated list
:param pig_properties: pig properties, additional pig properties passed as list
"""
template_fields: Sequence[str] = ("pig", "pig_opts", "pig_properties")
template_ext: Sequence[str] = (
".pig",
".piglatin",
)
ui_color = "#f0e4ec"
def __init__(
self,
*,
pig: str,
pig_cli_conn_id: str = "pig_cli_default",
pigparams_jinja_translate: bool = False,
pig_opts: str | None = None,
pig_properties: list[str] | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.pigparams_jinja_translate = pigparams_jinja_translate
self.pig = pig
self.pig_cli_conn_id = pig_cli_conn_id
self.pig_opts = pig_opts
self.pig_properties = pig_properties
self.hook: PigCliHook | None = None
def prepare_template(self):
if self.pigparams_jinja_translate:
self.pig = re.sub(r"(\$([a-zA-Z_][a-zA-Z0-9_]*))", r"{{ \g<2> }}", self.pig)
def execute(self, context: Context):
self.log.info("Executing: %s", self.pig)
self.hook = PigCliHook(pig_cli_conn_id=self.pig_cli_conn_id, pig_properties=self.pig_properties)
self.hook.run_cli(pig=self.pig, pig_opts=self.pig_opts)
def on_kill(self):
self.hook.kill()
| 2,978 | 35.329268 | 104 | py |
airflow | airflow-main/airflow/providers/apache/pig/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/pig/hooks/pig.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import subprocess
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import Any
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
class PigCliHook(BaseHook):
"""Simple wrapper around the pig CLI.
:param pig_cli_conn_id: Connection id used by the hook
:param pig_properties: additional properties added after pig cli command as list of strings.
"""
conn_name_attr = "pig_cli_conn_id"
default_conn_name = "pig_cli_default"
conn_type = "pig_cli"
hook_name = "Pig Client Wrapper"
def __init__(
self, pig_cli_conn_id: str = default_conn_name, pig_properties: list[str] | None = None
) -> None:
super().__init__()
conn = self.get_connection(pig_cli_conn_id)
conn_pig_properties = conn.extra_dejson.get("pig_properties")
if conn_pig_properties:
raise RuntimeError(
"The PigCliHook used to have possibility of passing `pig_properties` to the Hook,"
" however with the 4.0.0 version of `apache-pig` provider it has been removed. You should"
" use ``pig_opts`` (space separated string) or ``pig_properties`` (string list) in the"
" PigOperator. You can also pass ``pig-properties`` in the PigCliHook `init`. Currently,"
f" the {pig_cli_conn_id} connection has those extras: `{conn_pig_properties}`."
)
self.pig_properties = pig_properties if pig_properties else []
self.conn = conn
self.sub_process = None
def run_cli(self, pig: str, pig_opts: str | None = None, verbose: bool = True) -> Any:
"""
Run a pig script using the pig cli.
>>> ph = PigCliHook()
>>> result = ph.run_cli("ls /;", pig_opts="-x mapreduce")
>>> ("hdfs://" in result)
True
"""
with TemporaryDirectory(prefix="airflow_pigop_") as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(pig.encode("utf-8"))
f.flush()
fname = f.name
pig_bin = "pig"
cmd_extra: list[str] = []
pig_cmd = [pig_bin]
if self.pig_properties:
pig_cmd.extend(self.pig_properties)
if pig_opts:
pig_opts_list = pig_opts.split()
pig_cmd.extend(pig_opts_list)
pig_cmd.extend(["-f", fname] + cmd_extra)
if verbose:
self.log.info("%s", " ".join(pig_cmd))
sub_process: Any = subprocess.Popen(
pig_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=tmp_dir, close_fds=True
)
self.sub_process = sub_process
stdout = ""
for line in iter(sub_process.stdout.readline, b""):
stdout += line.decode("utf-8")
if verbose:
self.log.info(line.strip())
sub_process.wait()
if sub_process.returncode:
raise AirflowException(stdout)
return stdout
def kill(self) -> None:
"""Kill Pig job."""
if self.sub_process:
if self.sub_process.poll() is None:
self.log.info("Killing the Pig job")
self.sub_process.kill()
| 4,253 | 38.027523 | 106 | py |
airflow | airflow-main/airflow/providers/apache/pig/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/flink/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "1.1.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-flink:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,537 | 35.619048 | 121 | py |
airflow | airflow-main/airflow/providers/apache/flink/operators/flink_kubernetes.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, Sequence
from kubernetes.client import CoreV1Api
from airflow.models import BaseOperator
from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class FlinkKubernetesOperator(BaseOperator):
"""
Creates flinkDeployment object in kubernetes cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:FlinkKubernetesOperator`
.. seealso::
For more detail about Flink Deployment Object have a look at the reference:
https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main/docs/custom-resource/reference/#flinkdeployment
:param application_file: Defines Kubernetes 'custom_resource_definition' of 'flinkDeployment' as either a
path to a '.yaml' file, '.json' file, YAML string or JSON string.
:param namespace: kubernetes namespace to put flinkDeployment
:param kubernetes_conn_id: The :ref:`kubernetes connection id <howto/connection:kubernetes>`
for the to Kubernetes cluster.
:param api_group: kubernetes api group of flinkDeployment
:param api_version: kubernetes api version of flinkDeployment
:param in_cluster: run kubernetes client with in_cluster configuration.
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used.
:param config_file: The path to the Kubernetes config file. (templated)
If not specified, default value is ``~/.kube/config``
"""
template_fields: Sequence[str] = ("application_file", "namespace")
template_ext: Sequence[str] = (".yaml", ".yml", ".json")
ui_color = "#f4a460"
def __init__(
self,
*,
application_file: str,
namespace: str | None = None,
kubernetes_conn_id: str = "kubernetes_default",
api_group: str = "flink.apache.org",
api_version: str = "v1beta1",
in_cluster: bool | None = None,
cluster_context: str | None = None,
config_file: str | None = None,
plural: str = "flinkdeployments",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.application_file = application_file
self.namespace = namespace
self.kubernetes_conn_id = kubernetes_conn_id
self.api_group = api_group
self.api_version = api_version
self.plural = plural
self.in_cluster = in_cluster
self.cluster_context = cluster_context
self.config_file = config_file
@cached_property
def hook(self) -> KubernetesHook:
hook = KubernetesHook(
conn_id=self.kubernetes_conn_id,
in_cluster=self.in_cluster,
config_file=self.config_file,
cluster_context=self.cluster_context,
)
return hook
@cached_property
def client(self) -> CoreV1Api:
return self.hook.core_v1_client
def execute(self, context: Context):
self.log.info(
"Creating flinkApplication with Context: %s and op_context: %s", self.cluster_context, context
)
self.hook.custom_object_client.list_cluster_custom_object(
group=self.api_group, version=self.api_version, plural=self.plural
)
self.log.info("body=self.application_file: %s", self.application_file)
response = self.hook.create_custom_object(
group=self.api_group,
version=self.api_version,
plural=self.plural,
body=self.application_file,
namespace=self.namespace,
)
return response
| 4,598 | 37.647059 | 126 | py |
airflow | airflow-main/airflow/providers/apache/flink/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/flink/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/flink/sensors/flink_kubernetes.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Sequence
from kubernetes import client
from airflow.exceptions import AirflowException
from airflow.providers.cncf.kubernetes.hooks.kubernetes import KubernetesHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class FlinkKubernetesSensor(BaseSensorOperator):
"""
Checks flinkDeployment object in kubernetes cluster.
.. seealso::
For more detail about Flink Deployment Object have a look at the reference:
https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main/docs/custom-resource/reference/#flinkdeployment
:param application_name: flink Application resource name
:param namespace: the kubernetes namespace where the flinkDeployment reside in
:param kubernetes_conn_id: The :ref:`kubernetes connection<howto/connection:kubernetes>`
to Kubernetes cluster
:param attach_log: determines whether logs for driver pod should be appended to the sensor log
:param api_group: kubernetes api group of flinkDeployment
:param api_version: kubernetes api version of flinkDeployment
:param plural: kubernetes api custom object plural
"""
template_fields: Sequence[str] = ("application_name", "namespace")
FAILURE_STATES = ("MISSING", "ERROR")
SUCCESS_STATES = ("READY",)
def __init__(
self,
*,
application_name: str,
attach_log: bool = False,
namespace: str | None = None,
kubernetes_conn_id: str = "kubernetes_default",
api_group: str = "flink.apache.org",
api_version: str = "v1beta1",
plural: str = "flinkdeployments",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.application_name = application_name
self.attach_log = attach_log
self.namespace = namespace
self.kubernetes_conn_id = kubernetes_conn_id
self.hook = KubernetesHook(conn_id=self.kubernetes_conn_id)
self.api_group = api_group
self.api_version = api_version
self.plural = plural
def _log_driver(self, application_state: str, response: dict) -> None:
log_method = self.log.error if application_state in self.FAILURE_STATES else self.log.info
if not self.attach_log:
return
status_info = response["status"]
if "jobStatus" in status_info:
job_status = status_info["jobStatus"]
job_state = job_status["state"] if "state" in job_status else "StateFetchError"
self.log.info("Flink Job status is %s", job_state)
else:
return
task_manager_labels = status_info["taskManager"]["labelSelector"]
all_pods = self.hook.get_namespaced_pod_list(
namespace="default", watch=False, label_selector=task_manager_labels
)
namespace = response["metadata"]["namespace"]
if len(all_pods.items) > 0:
for task_manager in all_pods.items:
task_manager_pod_name = task_manager.metadata.name
self.log.info("Starting logging of task manager pod %s ", task_manager_pod_name)
try:
log = ""
for line in self.hook.get_pod_logs(task_manager_pod_name, namespace=namespace):
log += line.decode()
log_method(log)
except client.rest.ApiException as e:
self.log.warning(
"Could not read logs for pod %s. It may have been disposed.\n"
"Make sure timeToLiveSeconds is set on your flinkDeployment spec.\n"
"underlying exception: %s",
task_manager_pod_name,
e,
)
def poke(self, context: Context) -> bool:
self.log.info("Poking: %s", self.application_name)
response = self.hook.get_custom_object(
group=self.api_group,
version=self.api_version,
plural=self.plural,
name=self.application_name,
namespace=self.namespace,
)
try:
application_state = response["status"]["jobManagerDeploymentStatus"]
except KeyError:
return False
if self.attach_log and application_state in self.FAILURE_STATES + self.SUCCESS_STATES:
self._log_driver(application_state, response)
if application_state in self.FAILURE_STATES:
raise AirflowException(f"Flink application failed with state: {application_state}")
elif application_state in self.SUCCESS_STATES:
self.log.info("Flink application ended successfully")
return True
else:
self.log.info("Flink application is still in state: %s", application_state)
return False
| 5,714 | 41.022059 | 126 | py |
airflow | airflow-main/airflow/providers/apache/flink/sensors/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/cassandra/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-cassandra:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,541 | 35.714286 | 125 | py |
airflow | airflow-main/airflow/providers/apache/cassandra/hooks/cassandra.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains hook to integrate with Apache Cassandra."""
from __future__ import annotations
from typing import Any, Union
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster, Session
from cassandra.policies import (
DCAwareRoundRobinPolicy,
RoundRobinPolicy,
TokenAwarePolicy,
WhiteListRoundRobinPolicy,
)
from airflow.hooks.base import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
Policy = Union[DCAwareRoundRobinPolicy, RoundRobinPolicy, TokenAwarePolicy, WhiteListRoundRobinPolicy]
class CassandraHook(BaseHook, LoggingMixin):
"""
Hook used to interact with Cassandra.
Contact points can be specified as a comma-separated string in the 'hosts'
field of the connection.
Port can be specified in the port field of the connection.
If SSL is enabled in Cassandra, pass in a dict in the extra field as kwargs for
``ssl.wrap_socket()``. For example::
{
'ssl_options' : {
'ca_certs' : PATH_TO_CA_CERTS
}
}
Default load balancing policy is RoundRobinPolicy. To specify a different
LB policy::
- DCAwareRoundRobinPolicy
{
'load_balancing_policy': 'DCAwareRoundRobinPolicy',
'load_balancing_policy_args': {
'local_dc': LOCAL_DC_NAME, // optional
'used_hosts_per_remote_dc': SOME_INT_VALUE, // optional
}
}
- WhiteListRoundRobinPolicy
{
'load_balancing_policy': 'WhiteListRoundRobinPolicy',
'load_balancing_policy_args': {
'hosts': ['HOST1', 'HOST2', 'HOST3']
}
}
- TokenAwarePolicy
{
'load_balancing_policy': 'TokenAwarePolicy',
'load_balancing_policy_args': {
'child_load_balancing_policy': CHILD_POLICY_NAME, // optional
'child_load_balancing_policy_args': { ... } // optional
}
}
For details of the Cluster config, see cassandra.cluster.
"""
conn_name_attr = "cassandra_conn_id"
default_conn_name = "cassandra_default"
conn_type = "cassandra"
hook_name = "Cassandra"
def __init__(self, cassandra_conn_id: str = default_conn_name):
super().__init__()
conn = self.get_connection(cassandra_conn_id)
conn_config = {}
if conn.host:
conn_config["contact_points"] = conn.host.split(",")
if conn.port:
conn_config["port"] = int(conn.port)
if conn.login:
conn_config["auth_provider"] = PlainTextAuthProvider(username=conn.login, password=conn.password)
policy_name = conn.extra_dejson.get("load_balancing_policy", None)
policy_args = conn.extra_dejson.get("load_balancing_policy_args", {})
lb_policy = self.get_lb_policy(policy_name, policy_args)
if lb_policy:
conn_config["load_balancing_policy"] = lb_policy
cql_version = conn.extra_dejson.get("cql_version", None)
if cql_version:
conn_config["cql_version"] = cql_version
ssl_options = conn.extra_dejson.get("ssl_options", None)
if ssl_options:
conn_config["ssl_options"] = ssl_options
protocol_version = conn.extra_dejson.get("protocol_version", None)
if protocol_version:
conn_config["protocol_version"] = protocol_version
self.cluster = Cluster(**conn_config)
self.keyspace = conn.schema
self.session = None
def get_conn(self) -> Session:
"""Returns a cassandra Session object."""
if self.session and not self.session.is_shutdown:
return self.session
self.session = self.cluster.connect(self.keyspace)
return self.session
def get_cluster(self) -> Cluster:
"""Returns Cassandra cluster."""
return self.cluster
def shutdown_cluster(self) -> None:
"""Closes all sessions and connections associated with this Cluster."""
if not self.cluster.is_shutdown:
self.cluster.shutdown()
@staticmethod
def get_lb_policy(policy_name: str, policy_args: dict[str, Any]) -> Policy:
"""
Creates load balancing policy.
:param policy_name: Name of the policy to use.
:param policy_args: Parameters for the policy.
"""
if policy_name == "DCAwareRoundRobinPolicy":
local_dc = policy_args.get("local_dc", "")
used_hosts_per_remote_dc = int(policy_args.get("used_hosts_per_remote_dc", 0))
return DCAwareRoundRobinPolicy(local_dc, used_hosts_per_remote_dc)
if policy_name == "WhiteListRoundRobinPolicy":
hosts = policy_args.get("hosts")
if not hosts:
raise Exception("Hosts must be specified for WhiteListRoundRobinPolicy")
return WhiteListRoundRobinPolicy(hosts)
if policy_name == "TokenAwarePolicy":
allowed_child_policies = (
"RoundRobinPolicy",
"DCAwareRoundRobinPolicy",
"WhiteListRoundRobinPolicy",
)
child_policy_name = policy_args.get("child_load_balancing_policy", "RoundRobinPolicy")
child_policy_args = policy_args.get("child_load_balancing_policy_args", {})
if child_policy_name not in allowed_child_policies:
return TokenAwarePolicy(RoundRobinPolicy())
child_policy = CassandraHook.get_lb_policy(child_policy_name, child_policy_args)
return TokenAwarePolicy(child_policy)
# Fallback to default RoundRobinPolicy
return RoundRobinPolicy()
def table_exists(self, table: str) -> bool:
"""
Checks if a table exists in Cassandra.
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
"""
keyspace = self.keyspace
if "." in table:
keyspace, table = table.split(".", 1)
cluster_metadata = self.get_conn().cluster.metadata
return keyspace in cluster_metadata.keyspaces and table in cluster_metadata.keyspaces[keyspace].tables
def record_exists(self, table: str, keys: dict[str, str]) -> bool:
"""
Checks if a record exists in Cassandra.
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:param keys: The keys and their values to check the existence.
"""
keyspace = self.keyspace
if "." in table:
keyspace, table = table.split(".", 1)
ks_str = " AND ".join(f"{key}=%({key})s" for key in keys)
query = f"SELECT * FROM {keyspace}.{table} WHERE {ks_str}"
try:
result = self.get_conn().execute(query, keys)
return result.one() is not None
except Exception:
return False
| 7,874 | 36.679426 | 110 | py |
airflow | airflow-main/airflow/providers/apache/cassandra/hooks/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/cassandra/sensors/table.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CassandraTableSensor(BaseSensorOperator):
"""
Checks for the existence of a table in a Cassandra cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CassandraTableSensor`
For example, if you want to wait for a table called 't' to be created
in a keyspace 'k', instantiate it as follows:
>>> cassandra_sensor = CassandraTableSensor(table="k.t",
... cassandra_conn_id="cassandra_default",
... task_id="cassandra_sensor")
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:param cassandra_conn_id: The connection ID to use
when connecting to Cassandra cluster
"""
template_fields: Sequence[str] = ("table",)
def __init__(
self,
*,
table: str,
cassandra_conn_id: str = CassandraHook.default_conn_name,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.cassandra_conn_id = cassandra_conn_id
self.table = table
def poke(self, context: Context) -> bool:
self.log.info("Sensor check existence of table: %s", self.table)
hook = CassandraHook(self.cassandra_conn_id)
return hook.table_exists(self.table)
| 2,447 | 35 | 86 | py |
airflow | airflow-main/airflow/providers/apache/cassandra/sensors/record.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.providers.apache.cassandra.hooks.cassandra import CassandraHook
from airflow.sensors.base import BaseSensorOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class CassandraRecordSensor(BaseSensorOperator):
"""
Checks for the existence of a record in a Cassandra cluster.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CassandraRecordSensor`
For example, if you want to wait for a record that has values 'v1' and 'v2' for each
primary keys 'p1' and 'p2' to be populated in keyspace 'k' and table 't',
instantiate it as follows:
>>> cassandra_sensor = CassandraRecordSensor(table="k.t",
... keys={"p1": "v1", "p2": "v2"},
... cassandra_conn_id="cassandra_default",
... task_id="cassandra_sensor")
:param table: Target Cassandra table.
Use dot notation to target a specific keyspace.
:param keys: The keys and their values to be monitored
:param cassandra_conn_id: The connection ID to use
when connecting to Cassandra cluster
"""
template_fields: Sequence[str] = ("table", "keys")
def __init__(
self,
*,
keys: dict[str, str],
table: str,
cassandra_conn_id: str = CassandraHook.default_conn_name,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.cassandra_conn_id = cassandra_conn_id
self.table = table
self.keys = keys
def poke(self, context: Context) -> bool:
self.log.info("Sensor check existence of record: %s", self.keys)
hook = CassandraHook(self.cassandra_conn_id)
return hook.record_exists(self.table, self.keys)
| 2,739 | 37.055556 | 88 | py |
airflow | airflow-main/airflow/providers/apache/cassandra/sensors/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/spark/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "4.1.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-spark:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,537 | 35.619048 | 121 | py |
airflow | airflow-main/airflow/providers/apache/spark/operators/spark_submit.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.apache.spark.hooks.spark_submit import SparkSubmitHook
from airflow.settings import WEB_COLORS
if TYPE_CHECKING:
from airflow.utils.context import Context
class SparkSubmitOperator(BaseOperator):
"""
Wrap the spark-submit binary to kick off a spark-submit job; requires "spark-submit" binary in the PATH.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SparkSubmitOperator`
:param application: The application that submitted as a job, either jar or py file. (templated)
:param conf: Arbitrary Spark configuration properties (templated)
:param conn_id: The :ref:`spark connection id <howto/connection:spark>` as configured
in Airflow administration. When an invalid connection_id is supplied, it will default to yarn.
:param files: Upload additional files to the executor running the job, separated by a
comma. Files will be placed in the working directory of each executor.
For example, serialized objects. (templated)
:param py_files: Additional python files used by the job, can be .zip, .egg or .py. (templated)
:param jars: Submit additional jars to upload and place them in executor classpath. (templated)
:param driver_class_path: Additional, driver-specific, classpath settings. (templated)
:param java_class: the main class of the Java application
:param packages: Comma-separated list of maven coordinates of jars to include on the
driver and executor classpaths. (templated)
:param exclude_packages: Comma-separated list of maven coordinates of jars to exclude
while resolving the dependencies provided in 'packages' (templated)
:param repositories: Comma-separated list of additional remote repositories to search
for the maven coordinates given with 'packages'
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:param executor_cores: (Standalone & YARN only) Number of cores per executor (Default: 2)
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:param keytab: Full path to the file that contains the keytab (templated)
:param principal: The name of the kerberos principal used for keytab (templated)
:param proxy_user: User to impersonate when submitting the application (templated)
:param name: Name of the job (default airflow-spark). (templated)
:param num_executors: Number of executors to launch
:param status_poll_interval: Seconds to wait between polls of driver status in cluster
mode (Default: 1)
:param application_args: Arguments for the application being submitted (templated)
:param env_vars: Environment variables for spark-submit. It supports yarn and k8s mode too. (templated)
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:param spark_binary: The command to use for spark submit.
Some distros may use spark2-submit or spark3-submit.
"""
template_fields: Sequence[str] = (
"_application",
"_conf",
"_files",
"_py_files",
"_jars",
"_driver_class_path",
"_packages",
"_exclude_packages",
"_keytab",
"_principal",
"_proxy_user",
"_name",
"_application_args",
"_env_vars",
)
ui_color = WEB_COLORS["LIGHTORANGE"]
def __init__(
self,
*,
application: str = "",
conf: dict[str, Any] | None = None,
conn_id: str = "spark_default",
files: str | None = None,
py_files: str | None = None,
archives: str | None = None,
driver_class_path: str | None = None,
jars: str | None = None,
java_class: str | None = None,
packages: str | None = None,
exclude_packages: str | None = None,
repositories: str | None = None,
total_executor_cores: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
driver_memory: str | None = None,
keytab: str | None = None,
principal: str | None = None,
proxy_user: str | None = None,
name: str = "arrow-spark",
num_executors: int | None = None,
status_poll_interval: int = 1,
application_args: list[Any] | None = None,
env_vars: dict[str, Any] | None = None,
verbose: bool = False,
spark_binary: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self._application = application
self._conf = conf
self._files = files
self._py_files = py_files
self._archives = archives
self._driver_class_path = driver_class_path
self._jars = jars
self._java_class = java_class
self._packages = packages
self._exclude_packages = exclude_packages
self._repositories = repositories
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._proxy_user = proxy_user
self._name = name
self._num_executors = num_executors
self._status_poll_interval = status_poll_interval
self._application_args = application_args
self._env_vars = env_vars
self._verbose = verbose
self._spark_binary = spark_binary
self._hook: SparkSubmitHook | None = None
self._conn_id = conn_id
def execute(self, context: Context) -> None:
"""Call the SparkSubmitHook to run the provided spark job."""
if self._hook is None:
self._hook = self._get_hook()
self._hook.submit(self._application)
def on_kill(self) -> None:
if self._hook is None:
self._hook = self._get_hook()
self._hook.on_kill()
def _get_hook(self) -> SparkSubmitHook:
return SparkSubmitHook(
conf=self._conf,
conn_id=self._conn_id,
files=self._files,
py_files=self._py_files,
archives=self._archives,
driver_class_path=self._driver_class_path,
jars=self._jars,
java_class=self._java_class,
packages=self._packages,
exclude_packages=self._exclude_packages,
repositories=self._repositories,
total_executor_cores=self._total_executor_cores,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
driver_memory=self._driver_memory,
keytab=self._keytab,
principal=self._principal,
proxy_user=self._proxy_user,
name=self._name,
num_executors=self._num_executors,
status_poll_interval=self._status_poll_interval,
application_args=self._application_args,
env_vars=self._env_vars,
verbose=self._verbose,
spark_binary=self._spark_binary,
)
| 8,370 | 42.827225 | 108 | py |
airflow | airflow-main/airflow/providers/apache/spark/operators/spark_sql.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Sequence
from airflow.models import BaseOperator
from airflow.providers.apache.spark.hooks.spark_sql import SparkSqlHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class SparkSqlOperator(BaseOperator):
"""
Execute Spark SQL query.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SparkSqlOperator`
:param sql: The SQL query to execute. (templated)
:param conf: arbitrary Spark configuration property
:param conn_id: connection_id string
:param total_executor_cores: (Standalone & Mesos only) Total cores for all
executors (Default: all the available cores on the worker)
:param executor_cores: (Standalone & YARN only) Number of cores per
executor (Default: 2)
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:param keytab: Full path to the file that contains the keytab
:param master: spark://host:port, mesos://host:port, yarn, or local
(Default: The ``host`` and ``port`` set in the Connection, or ``"yarn"``)
:param name: Name of the job
:param num_executors: Number of executors to launch
:param verbose: Whether to pass the verbose flag to spark-sql
:param yarn_queue: The YARN queue to submit to
(Default: The ``queue`` value set in the Connection, or ``"default"``)
"""
template_fields: Sequence[str] = ("_sql",)
template_ext: Sequence[str] = (".sql", ".hql")
template_fields_renderers = {"_sql": "sql"}
def __init__(
self,
*,
sql: str,
conf: str | None = None,
conn_id: str = "spark_sql_default",
total_executor_cores: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
keytab: str | None = None,
principal: str | None = None,
master: str | None = None,
name: str = "default-name",
num_executors: int | None = None,
verbose: bool = True,
yarn_queue: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self._sql = sql
self._conf = conf
self._conn_id = conn_id
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._keytab = keytab
self._principal = principal
self._master = master
self._name = name
self._num_executors = num_executors
self._verbose = verbose
self._yarn_queue = yarn_queue
self._hook: SparkSqlHook | None = None
def execute(self, context: Context) -> None:
"""Call the SparkSqlHook to run the provided sql query."""
if self._hook is None:
self._hook = self._get_hook()
self._hook.run_query()
def on_kill(self) -> None:
if self._hook is None:
self._hook = self._get_hook()
self._hook.kill()
def _get_hook(self) -> SparkSqlHook:
"""Get SparkSqlHook."""
return SparkSqlHook(
sql=self._sql,
conf=self._conf,
conn_id=self._conn_id,
total_executor_cores=self._total_executor_cores,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
keytab=self._keytab,
principal=self._principal,
name=self._name,
num_executors=self._num_executors,
master=self._master,
verbose=self._verbose,
yarn_queue=self._yarn_queue,
)
| 4,508 | 36.264463 | 83 | py |
airflow | airflow-main/airflow/providers/apache/spark/operators/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/spark/operators/spark_jdbc.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from airflow.providers.apache.spark.hooks.spark_jdbc import SparkJDBCHook
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
if TYPE_CHECKING:
from airflow.utils.context import Context
class SparkJDBCOperator(SparkSubmitOperator):
"""
Extend the SparkSubmitOperator to perform data transfers to/from JDBC-based databases with Apache Spark.
As with the SparkSubmitOperator, it assumes that the "spark-submit" binary is available on the PATH.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:SparkJDBCOperator`
:param spark_app_name: Name of the job (default airflow-spark-jdbc)
:param spark_conn_id: The :ref:`spark connection id <howto/connection:spark>`
as configured in Airflow administration
:param spark_conf: Any additional Spark configuration properties
:param spark_py_files: Additional python files used (.zip, .egg, or .py)
:param spark_files: Additional files to upload to the container running the job
:param spark_jars: Additional jars to upload and add to the driver and
executor classpath
:param num_executors: number of executor to run. This should be set so as to manage
the number of connections made with the JDBC database
:param executor_cores: Number of cores per executor
:param executor_memory: Memory per executor (e.g. 1000M, 2G)
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G)
:param verbose: Whether to pass the verbose flag to spark-submit for debugging
:param keytab: Full path to the file that contains the keytab
:param principal: The name of the kerberos principal used for keytab
:param cmd_type: Which way the data should flow. 2 possible values:
spark_to_jdbc: data written by spark from metastore to jdbc
jdbc_to_spark: data written by spark from jdbc to metastore
:param jdbc_table: The name of the JDBC table
:param jdbc_conn_id: Connection id used for connection to JDBC database
:param jdbc_driver: Name of the JDBC driver to use for the JDBC connection. This
driver (usually a jar) should be passed in the 'jars' parameter
:param metastore_table: The name of the metastore table,
:param jdbc_truncate: (spark_to_jdbc only) Whether or not Spark should truncate or
drop and recreate the JDBC table. This only takes effect if
'save_mode' is set to Overwrite. Also, if the schema is
different, Spark cannot truncate, and will drop and recreate
:param save_mode: The Spark save-mode to use (e.g. overwrite, append, etc.)
:param save_format: (jdbc_to_spark-only) The Spark save-format to use (e.g. parquet)
:param batch_size: (spark_to_jdbc only) The size of the batch to insert per round
trip to the JDBC database. Defaults to 1000
:param fetch_size: (jdbc_to_spark only) The size of the batch to fetch per round trip
from the JDBC database. Default depends on the JDBC driver
:param num_partitions: The maximum number of partitions that can be used by Spark
simultaneously, both for spark_to_jdbc and jdbc_to_spark
operations. This will also cap the number of JDBC connections
that can be opened
:param partition_column: (jdbc_to_spark-only) A numeric column to be used to
partition the metastore table by. If specified, you must
also specify:
num_partitions, lower_bound, upper_bound
:param lower_bound: (jdbc_to_spark-only) Lower bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, upper_bound
:param upper_bound: (jdbc_to_spark-only) Upper bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, lower_bound
:param create_table_column_types: (spark_to_jdbc-only) The database column data types
to use instead of the defaults, when creating the
table. Data type information should be specified in
the same format as CREATE TABLE columns syntax
(e.g: "name CHAR(64), comments VARCHAR(1024)").
The specified types should be valid spark sql data
types.
"""
def __init__(
self,
*,
spark_app_name: str = "airflow-spark-jdbc",
spark_conn_id: str = "spark-default",
spark_conf: dict[str, Any] | None = None,
spark_py_files: str | None = None,
spark_files: str | None = None,
spark_jars: str | None = None,
num_executors: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
driver_memory: str | None = None,
verbose: bool = False,
principal: str | None = None,
keytab: str | None = None,
cmd_type: str = "spark_to_jdbc",
jdbc_table: str | None = None,
jdbc_conn_id: str = "jdbc-default",
jdbc_driver: str | None = None,
metastore_table: str | None = None,
jdbc_truncate: bool = False,
save_mode: str | None = None,
save_format: str | None = None,
batch_size: int | None = None,
fetch_size: int | None = None,
num_partitions: int | None = None,
partition_column: str | None = None,
lower_bound: str | None = None,
upper_bound: str | None = None,
create_table_column_types: str | None = None,
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self._spark_app_name = spark_app_name
self._spark_conn_id = spark_conn_id
self._spark_conf = spark_conf
self._spark_py_files = spark_py_files
self._spark_files = spark_files
self._spark_jars = spark_jars
self._num_executors = num_executors
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._verbose = verbose
self._keytab = keytab
self._principal = principal
self._cmd_type = cmd_type
self._jdbc_table = jdbc_table
self._jdbc_conn_id = jdbc_conn_id
self._jdbc_driver = jdbc_driver
self._metastore_table = metastore_table
self._jdbc_truncate = jdbc_truncate
self._save_mode = save_mode
self._save_format = save_format
self._batch_size = batch_size
self._fetch_size = fetch_size
self._num_partitions = num_partitions
self._partition_column = partition_column
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._create_table_column_types = create_table_column_types
self._hook: SparkJDBCHook | None = None
def execute(self, context: Context) -> None:
"""Call the SparkSubmitHook to run the provided spark job."""
if self._hook is None:
self._hook = self._get_hook()
self._hook.submit_jdbc_job()
def on_kill(self) -> None:
if self._hook is None:
self._hook = self._get_hook()
self._hook.on_kill()
def _get_hook(self) -> SparkJDBCHook:
return SparkJDBCHook(
spark_app_name=self._spark_app_name,
spark_conn_id=self._spark_conn_id,
spark_conf=self._spark_conf,
spark_py_files=self._spark_py_files,
spark_files=self._spark_files,
spark_jars=self._spark_jars,
num_executors=self._num_executors,
executor_cores=self._executor_cores,
executor_memory=self._executor_memory,
driver_memory=self._driver_memory,
verbose=self._verbose,
keytab=self._keytab,
principal=self._principal,
cmd_type=self._cmd_type,
jdbc_table=self._jdbc_table,
jdbc_conn_id=self._jdbc_conn_id,
jdbc_driver=self._jdbc_driver,
metastore_table=self._metastore_table,
jdbc_truncate=self._jdbc_truncate,
save_mode=self._save_mode,
save_format=self._save_format,
batch_size=self._batch_size,
fetch_size=self._fetch_size,
num_partitions=self._num_partitions,
partition_column=self._partition_column,
lower_bound=self._lower_bound,
upper_bound=self._upper_bound,
create_table_column_types=self._create_table_column_types,
)
| 9,935 | 48.188119 | 108 | py |
airflow | airflow-main/airflow/providers/apache/spark/hooks/spark_jdbc_script.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import argparse
from typing import Any
from pyspark.sql import SparkSession
SPARK_WRITE_TO_JDBC: str = "spark_to_jdbc"
SPARK_READ_FROM_JDBC: str = "jdbc_to_spark"
def set_common_options(
spark_source: Any,
url: str = "localhost:5432",
jdbc_table: str = "default.default",
user: str = "root",
password: str = "root",
driver: str = "driver",
) -> Any:
"""
Get Spark source from JDBC connection.
:param spark_source: Spark source, here is Spark reader or writer
:param url: JDBC resource url
:param jdbc_table: JDBC resource table name
:param user: JDBC resource user name
:param password: JDBC resource password
:param driver: JDBC resource driver
"""
spark_source = (
spark_source.format("jdbc")
.option("url", url)
.option("dbtable", jdbc_table)
.option("user", user)
.option("password", password)
.option("driver", driver)
)
return spark_source
def spark_write_to_jdbc(
spark_session: SparkSession,
url: str,
user: str,
password: str,
metastore_table: str,
jdbc_table: str,
driver: Any,
truncate: bool,
save_mode: str,
batch_size: int,
num_partitions: int,
create_table_column_types: str,
) -> None:
"""Transfer data from Spark to JDBC source."""
writer = spark_session.table(metastore_table).write
# first set common options
writer = set_common_options(writer, url, jdbc_table, user, password, driver)
# now set write-specific options
if truncate:
writer = writer.option("truncate", truncate)
if batch_size:
writer = writer.option("batchsize", batch_size)
if num_partitions:
writer = writer.option("numPartitions", num_partitions)
if create_table_column_types:
writer = writer.option("createTableColumnTypes", create_table_column_types)
writer.save(mode=save_mode)
def spark_read_from_jdbc(
spark_session: SparkSession,
url: str,
user: str,
password: str,
metastore_table: str,
jdbc_table: str,
driver: Any,
save_mode: str,
save_format: str,
fetch_size: int,
num_partitions: int,
partition_column: str,
lower_bound: str,
upper_bound: str,
) -> None:
"""Transfer data from JDBC source to Spark."""
# first set common options
reader = set_common_options(spark_session.read, url, jdbc_table, user, password, driver)
# now set specific read options
if fetch_size:
reader = reader.option("fetchsize", fetch_size)
if num_partitions:
reader = reader.option("numPartitions", num_partitions)
if partition_column and lower_bound and upper_bound:
reader = (
reader.option("partitionColumn", partition_column)
.option("lowerBound", lower_bound)
.option("upperBound", upper_bound)
)
reader.load().write.saveAsTable(metastore_table, format=save_format, mode=save_mode)
def _parse_arguments(args: list[str] | None = None) -> Any:
parser = argparse.ArgumentParser(description="Spark-JDBC")
parser.add_argument("-cmdType", dest="cmd_type", action="store")
parser.add_argument("-url", dest="url", action="store")
parser.add_argument("-user", dest="user", action="store")
parser.add_argument("-password", dest="password", action="store")
parser.add_argument("-metastoreTable", dest="metastore_table", action="store")
parser.add_argument("-jdbcTable", dest="jdbc_table", action="store")
parser.add_argument("-jdbcDriver", dest="jdbc_driver", action="store")
parser.add_argument("-jdbcTruncate", dest="truncate", action="store")
parser.add_argument("-saveMode", dest="save_mode", action="store")
parser.add_argument("-saveFormat", dest="save_format", action="store")
parser.add_argument("-batchsize", dest="batch_size", action="store")
parser.add_argument("-fetchsize", dest="fetch_size", action="store")
parser.add_argument("-name", dest="name", action="store")
parser.add_argument("-numPartitions", dest="num_partitions", action="store")
parser.add_argument("-partitionColumn", dest="partition_column", action="store")
parser.add_argument("-lowerBound", dest="lower_bound", action="store")
parser.add_argument("-upperBound", dest="upper_bound", action="store")
parser.add_argument("-createTableColumnTypes", dest="create_table_column_types", action="store")
return parser.parse_args(args=args)
def _create_spark_session(arguments: Any) -> SparkSession:
return SparkSession.builder.appName(arguments.name).enableHiveSupport().getOrCreate()
def _run_spark(arguments: Any) -> None:
# Disable dynamic allocation by default to allow num_executors to take effect.
spark = _create_spark_session(arguments)
if arguments.cmd_type == SPARK_WRITE_TO_JDBC:
spark_write_to_jdbc(
spark,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.truncate,
arguments.save_mode,
arguments.batch_size,
arguments.num_partitions,
arguments.create_table_column_types,
)
elif arguments.cmd_type == SPARK_READ_FROM_JDBC:
spark_read_from_jdbc(
spark,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.save_mode,
arguments.save_format,
arguments.fetch_size,
arguments.num_partitions,
arguments.partition_column,
arguments.lower_bound,
arguments.upper_bound,
)
if __name__ == "__main__": # pragma: no cover
_run_spark(arguments=_parse_arguments())
| 6,753 | 34.177083 | 100 | py |
airflow | airflow-main/airflow/providers/apache/spark/hooks/spark_submit.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import contextlib
import os
import re
import subprocess
import time
from typing import Any, Iterator
from airflow.configuration import conf as airflow_conf
from airflow.exceptions import AirflowException
from airflow.hooks.base import BaseHook
from airflow.security.kerberos import renew_from_kt
from airflow.utils.log.logging_mixin import LoggingMixin
with contextlib.suppress(ImportError, NameError):
from airflow.kubernetes import kube_client
ALLOWED_SPARK_BINARIES = ["spark-submit", "spark2-submit", "spark3-submit"]
class SparkSubmitHook(BaseHook, LoggingMixin):
"""
Wrap the spark-submit binary to kick off a spark-submit job; requires "spark-submit" binary in the PATH.
:param conf: Arbitrary Spark configuration properties
:param spark_conn_id: The :ref:`spark connection id <howto/connection:spark>` as configured
in Airflow administration. When an invalid connection_id is supplied, it will default
to yarn.
:param files: Upload additional files to the executor running the job, separated by a
comma. Files will be placed in the working directory of each executor.
For example, serialized objects.
:param py_files: Additional python files used by the job, can be .zip, .egg or .py.
:param archives: Archives that spark should unzip (and possibly tag with #ALIAS) into
the application working directory.
:param driver_class_path: Additional, driver-specific, classpath settings.
:param jars: Submit additional jars to upload and place them in executor classpath.
:param java_class: the main class of the Java application
:param packages: Comma-separated list of maven coordinates of jars to include on the
driver and executor classpaths
:param exclude_packages: Comma-separated list of maven coordinates of jars to exclude
while resolving the dependencies provided in 'packages'
:param repositories: Comma-separated list of additional remote repositories to search
for the maven coordinates given with 'packages'
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:param executor_cores: (Standalone, YARN and Kubernetes only) Number of cores per
executor (Default: 2)
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G) (Default: 1G)
:param keytab: Full path to the file that contains the keytab
:param principal: The name of the kerberos principal used for keytab
:param proxy_user: User to impersonate when submitting the application
:param name: Name of the job (default airflow-spark)
:param num_executors: Number of executors to launch
:param status_poll_interval: Seconds to wait between polls of driver status in cluster
mode (Default: 1)
:param application_args: Arguments for the application being submitted
:param env_vars: Environment variables for spark-submit. It
supports yarn and k8s mode too.
:param verbose: Whether to pass the verbose flag to spark-submit process for debugging
:param spark_binary: The command to use for spark submit.
Some distros may use spark2-submit or spark3-submit.
"""
conn_name_attr = "conn_id"
default_conn_name = "spark_default"
conn_type = "spark"
hook_name = "Spark"
@staticmethod
def get_ui_field_behaviour() -> dict[str, Any]:
"""Returns custom field behaviour."""
return {
"hidden_fields": ["schema", "login", "password"],
"relabeling": {},
}
def __init__(
self,
conf: dict[str, Any] | None = None,
conn_id: str = "spark_default",
files: str | None = None,
py_files: str | None = None,
archives: str | None = None,
driver_class_path: str | None = None,
jars: str | None = None,
java_class: str | None = None,
packages: str | None = None,
exclude_packages: str | None = None,
repositories: str | None = None,
total_executor_cores: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
driver_memory: str | None = None,
keytab: str | None = None,
principal: str | None = None,
proxy_user: str | None = None,
name: str = "default-name",
num_executors: int | None = None,
status_poll_interval: int = 1,
application_args: list[Any] | None = None,
env_vars: dict[str, Any] | None = None,
verbose: bool = False,
spark_binary: str | None = None,
) -> None:
super().__init__()
self._conf = conf or {}
self._conn_id = conn_id
self._files = files
self._py_files = py_files
self._archives = archives
self._driver_class_path = driver_class_path
self._jars = jars
self._java_class = java_class
self._packages = packages
self._exclude_packages = exclude_packages
self._repositories = repositories
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._keytab = keytab
self._principal = principal
self._proxy_user = proxy_user
self._name = name
self._num_executors = num_executors
self._status_poll_interval = status_poll_interval
self._application_args = application_args
self._env_vars = env_vars
self._verbose = verbose
self._submit_sp: Any | None = None
self._yarn_application_id: str | None = None
self._kubernetes_driver_pod: str | None = None
self.spark_binary = spark_binary
self._connection = self._resolve_connection()
self._is_yarn = "yarn" in self._connection["master"]
self._is_kubernetes = "k8s" in self._connection["master"]
if self._is_kubernetes and kube_client is None:
raise RuntimeError(
f"{self._connection['master']} specified by kubernetes dependencies are not installed!"
)
self._should_track_driver_status = self._resolve_should_track_driver_status()
self._driver_id: str | None = None
self._driver_status: str | None = None
self._spark_exit_code: int | None = None
self._env: dict[str, Any] | None = None
def _resolve_should_track_driver_status(self) -> bool:
"""Check if we should track the driver status.
If so, we should send subsequent spark-submit status requests after the
initial spark-submit request.
:return: if the driver status should be tracked
"""
return "spark://" in self._connection["master"] and self._connection["deploy_mode"] == "cluster"
def _resolve_connection(self) -> dict[str, Any]:
# Build from connection master or default to yarn if not available
conn_data = {
"master": "yarn",
"queue": None,
"deploy_mode": None,
"spark_binary": self.spark_binary or "spark-submit",
"namespace": None,
}
try:
# Master can be local, yarn, spark://HOST:PORT, mesos://HOST:PORT and
# k8s://https://<HOST>:<PORT>
conn = self.get_connection(self._conn_id)
if conn.port:
conn_data["master"] = f"{conn.host}:{conn.port}"
else:
conn_data["master"] = conn.host
# Determine optional yarn queue from the extra field
extra = conn.extra_dejson
conn_data["queue"] = extra.get("queue")
conn_data["deploy_mode"] = extra.get("deploy-mode")
if not self.spark_binary:
self.spark_binary = extra.get("spark-binary", "spark-submit")
if self.spark_binary is not None and self.spark_binary not in ALLOWED_SPARK_BINARIES:
raise RuntimeError(
f"The spark-binary extra can be on of {ALLOWED_SPARK_BINARIES} and it"
f" was `{self.spark_binary}`. Please make sure your spark binary is one of the"
f" allowed ones and that it is available on the PATH"
)
conn_spark_home = extra.get("spark-home")
if conn_spark_home:
raise RuntimeError(
"The `spark-home` extra is not allowed any more. Please make sure one of"
f" {ALLOWED_SPARK_BINARIES} is available on the PATH, and set `spark-binary`"
" if needed."
)
conn_data["spark_binary"] = self.spark_binary
conn_data["namespace"] = extra.get("namespace")
except AirflowException:
self.log.info(
"Could not load connection string %s, defaulting to %s", self._conn_id, conn_data["master"]
)
if "spark.kubernetes.namespace" in self._conf:
conn_data["namespace"] = self._conf["spark.kubernetes.namespace"]
return conn_data
def get_conn(self) -> Any:
pass
def _get_spark_binary_path(self) -> list[str]:
# Assume that spark-submit is present in the path to the executing user
return [self._connection["spark_binary"]]
def _mask_cmd(self, connection_cmd: str | list[str]) -> str:
# Mask any password related fields in application args with key value pair
# where key contains password (case insensitive), e.g. HivePassword='abc'
connection_cmd_masked = re.sub(
r"("
r"\S*?" # Match all non-whitespace characters before...
r"(?:secret|password)" # ...literally a "secret" or "password"
# word (not capturing them).
r"\S*?" # All non-whitespace characters before either...
r"(?:=|\s+)" # ...an equal sign or whitespace characters
# (not capturing them).
r"(['\"]?)" # An optional single or double quote.
r")" # This is the end of the first capturing group.
r"(?:(?!\2\s).)*" # All characters between optional quotes
# (matched above); if the value is quoted,
# it may contain whitespace.
r"(\2)", # Optional matching quote.
r"\1******\3",
" ".join(connection_cmd),
flags=re.I,
)
return connection_cmd_masked
def _build_spark_submit_command(self, application: str) -> list[str]:
"""
Construct the spark-submit command to execute.
:param application: command to append to the spark-submit command
:return: full command to be executed
"""
connection_cmd = self._get_spark_binary_path()
# The url of the spark master
connection_cmd += ["--master", self._connection["master"]]
for key in self._conf:
connection_cmd += ["--conf", f"{key}={str(self._conf[key])}"]
if self._env_vars and (self._is_kubernetes or self._is_yarn):
if self._is_yarn:
tmpl = "spark.yarn.appMasterEnv.{}={}"
# Allow dynamic setting of hadoop/yarn configuration environments
self._env = self._env_vars
else:
tmpl = "spark.kubernetes.driverEnv.{}={}"
for key in self._env_vars:
connection_cmd += ["--conf", tmpl.format(key, str(self._env_vars[key]))]
elif self._env_vars and self._connection["deploy_mode"] != "cluster":
self._env = self._env_vars # Do it on Popen of the process
elif self._env_vars and self._connection["deploy_mode"] == "cluster":
raise AirflowException("SparkSubmitHook env_vars is not supported in standalone-cluster mode.")
if self._is_kubernetes and self._connection["namespace"]:
connection_cmd += [
"--conf",
f"spark.kubernetes.namespace={self._connection['namespace']}",
]
if self._files:
connection_cmd += ["--files", self._files]
if self._py_files:
connection_cmd += ["--py-files", self._py_files]
if self._archives:
connection_cmd += ["--archives", self._archives]
if self._driver_class_path:
connection_cmd += ["--driver-class-path", self._driver_class_path]
if self._jars:
connection_cmd += ["--jars", self._jars]
if self._packages:
connection_cmd += ["--packages", self._packages]
if self._exclude_packages:
connection_cmd += ["--exclude-packages", self._exclude_packages]
if self._repositories:
connection_cmd += ["--repositories", self._repositories]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._driver_memory:
connection_cmd += ["--driver-memory", self._driver_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._proxy_user:
connection_cmd += ["--proxy-user", self._proxy_user]
if self._name:
connection_cmd += ["--name", self._name]
if self._java_class:
connection_cmd += ["--class", self._java_class]
if self._verbose:
connection_cmd += ["--verbose"]
if self._connection["queue"]:
connection_cmd += ["--queue", self._connection["queue"]]
if self._connection["deploy_mode"]:
connection_cmd += ["--deploy-mode", self._connection["deploy_mode"]]
# The actual script to execute
connection_cmd += [application]
# Append any application arguments
if self._application_args:
connection_cmd += self._application_args
self.log.info("Spark-Submit cmd: %s", self._mask_cmd(connection_cmd))
return connection_cmd
def _build_track_driver_status_command(self) -> list[str]:
"""
Construct the command to poll the driver status.
:return: full command to be executed
"""
curl_max_wait_time = 30
spark_host = self._connection["master"]
if spark_host.endswith(":6066"):
spark_host = spark_host.replace("spark://", "http://")
connection_cmd = [
"/usr/bin/curl",
"--max-time",
str(curl_max_wait_time),
f"{spark_host}/v1/submissions/status/{self._driver_id}",
]
self.log.info(connection_cmd)
# The driver id so we can poll for its status
if not self._driver_id:
raise AirflowException(
"Invalid status: attempted to poll driver status but no driver id is known. Giving up."
)
else:
connection_cmd = self._get_spark_binary_path()
# The url to the spark master
connection_cmd += ["--master", self._connection["master"]]
# The driver id so we can poll for its status
if self._driver_id:
connection_cmd += ["--status", self._driver_id]
else:
raise AirflowException(
"Invalid status: attempted to poll driver status but no driver id is known. Giving up."
)
self.log.debug("Poll driver status cmd: %s", connection_cmd)
return connection_cmd
def submit(self, application: str = "", **kwargs: Any) -> None:
"""
Remote Popen to execute the spark-submit job.
:param application: Submitted application, jar or py file
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_submit_cmd = self._build_spark_submit_command(application)
if self._env:
env = os.environ.copy()
env.update(self._env)
kwargs["env"] = env
self._submit_sp = subprocess.Popen(
spark_submit_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True,
**kwargs,
)
self._process_spark_submit_log(iter(self._submit_sp.stdout)) # type: ignore
returncode = self._submit_sp.wait()
# Check spark-submit return code. In Kubernetes mode, also check the value
# of exit code in the log, as it may differ.
if returncode or (self._is_kubernetes and self._spark_exit_code != 0):
if self._is_kubernetes:
raise AirflowException(
f"Cannot execute: {self._mask_cmd(spark_submit_cmd)}. Error code is: {returncode}. "
f"Kubernetes spark exit code is: {self._spark_exit_code}"
)
else:
raise AirflowException(
f"Cannot execute: {self._mask_cmd(spark_submit_cmd)}. Error code is: {returncode}."
)
self.log.debug("Should track driver: %s", self._should_track_driver_status)
# We want the Airflow job to wait until the Spark driver is finished
if self._should_track_driver_status:
if self._driver_id is None:
raise AirflowException(
"No driver id is known: something went wrong when executing the spark submit command"
)
# We start with the SUBMITTED status as initial status
self._driver_status = "SUBMITTED"
# Start tracking the driver status (blocking function)
self._start_driver_status_tracking()
if self._driver_status != "FINISHED":
raise AirflowException(
f"ERROR : Driver {self._driver_id} badly exited with status {self._driver_status}"
)
def _process_spark_submit_log(self, itr: Iterator[Any]) -> None:
"""
Processes the log files and extracts useful information out of it.
If the deploy-mode is 'client', log the output of the submit command as those
are the output logs of the Spark worker directly.
Remark: If the driver needs to be tracked for its status, the log-level of the
spark deploy needs to be at least INFO (log4j.logger.org.apache.spark.deploy=INFO)
:param itr: An iterator which iterates over the input of the subprocess
"""
# Consume the iterator
for line in itr:
line = line.strip()
# If we run yarn cluster mode, we want to extract the application id from
# the logs so we can kill the application when we stop it unexpectedly
if self._is_yarn and self._connection["deploy_mode"] == "cluster":
match = re.search("(application[0-9_]+)", line)
if match:
self._yarn_application_id = match.groups()[0]
self.log.info("Identified spark driver id: %s", self._yarn_application_id)
# If we run Kubernetes cluster mode, we want to extract the driver pod id
# from the logs so we can kill the application when we stop it unexpectedly
elif self._is_kubernetes:
match = re.search(r"\s*pod name: ((.+?)-([a-z0-9]+)-driver)", line)
if match:
self._kubernetes_driver_pod = match.groups()[0]
self.log.info("Identified spark driver pod: %s", self._kubernetes_driver_pod)
# Store the Spark Exit code
match_exit_code = re.search(r"\s*[eE]xit code: (\d+)", line)
if match_exit_code:
self._spark_exit_code = int(match_exit_code.groups()[0])
# if we run in standalone cluster mode and we want to track the driver status
# we need to extract the driver id from the logs. This allows us to poll for
# the status using the driver id. Also, we can kill the driver when needed.
elif self._should_track_driver_status and not self._driver_id:
match_driver_id = re.search(r"(driver-[0-9\-]+)", line)
if match_driver_id:
self._driver_id = match_driver_id.groups()[0]
self.log.info("identified spark driver id: %s", self._driver_id)
self.log.info(line)
def _process_spark_status_log(self, itr: Iterator[Any]) -> None:
"""
Parses the logs of the spark driver status query process.
:param itr: An iterator which iterates over the input of the subprocess
"""
driver_found = False
valid_response = False
# Consume the iterator
for line in itr:
line = line.strip()
# A valid Spark status response should contain a submissionId
if "submissionId" in line:
valid_response = True
# Check if the log line is about the driver status and extract the status.
if "driverState" in line:
self._driver_status = line.split(" : ")[1].replace(",", "").replace('"', "").strip()
driver_found = True
self.log.debug("spark driver status log: %s", line)
if valid_response and not driver_found:
self._driver_status = "UNKNOWN"
def _start_driver_status_tracking(self) -> None:
"""
Polls the driver based on self._driver_id to get the status.
Finish successfully when the status is FINISHED.
Finish failed when the status is ERROR/UNKNOWN/KILLED/FAILED.
Possible status:
SUBMITTED
Submitted but not yet scheduled on a worker
RUNNING
Has been allocated to a worker to run
FINISHED
Previously ran and exited cleanly
RELAUNCHING
Exited non-zero or due to worker failure, but has not yet
started running again
UNKNOWN
The status of the driver is temporarily not known due to
master failure recovery
KILLED
A user manually killed this driver
FAILED
The driver exited non-zero and was not supervised
ERROR
Unable to run or restart due to an unrecoverable error
(e.g. missing jar file)
"""
# When your Spark Standalone cluster is not performing well
# due to misconfiguration or heavy loads.
# it is possible that the polling request will timeout.
# Therefore we use a simple retry mechanism.
missed_job_status_reports = 0
max_missed_job_status_reports = 10
# Keep polling as long as the driver is processing
while self._driver_status not in ["FINISHED", "UNKNOWN", "KILLED", "FAILED", "ERROR"]:
# Sleep for n seconds as we do not want to spam the cluster
time.sleep(self._status_poll_interval)
self.log.debug("polling status of spark driver with id %s", self._driver_id)
poll_drive_status_cmd = self._build_track_driver_status_command()
status_process: Any = subprocess.Popen(
poll_drive_status_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=-1,
universal_newlines=True,
)
self._process_spark_status_log(iter(status_process.stdout))
returncode = status_process.wait()
if returncode:
if missed_job_status_reports < max_missed_job_status_reports:
missed_job_status_reports += 1
else:
raise AirflowException(
f"Failed to poll for the driver status {max_missed_job_status_reports} times: "
f"returncode = {returncode}"
)
def _build_spark_driver_kill_command(self) -> list[str]:
"""
Construct the spark-submit command to kill a driver.
:return: full command to kill a driver
"""
# Assume that spark-submit is present in the path to the executing user
connection_cmd = [self._connection["spark_binary"]]
# The url to the spark master
connection_cmd += ["--master", self._connection["master"]]
# The actual kill command
if self._driver_id:
connection_cmd += ["--kill", self._driver_id]
self.log.debug("Spark-Kill cmd: %s", connection_cmd)
return connection_cmd
def on_kill(self) -> None:
"""Kill Spark submit command."""
self.log.debug("Kill Command is being called")
if self._should_track_driver_status and self._driver_id:
self.log.info("Killing driver %s on cluster", self._driver_id)
kill_cmd = self._build_spark_driver_kill_command()
with subprocess.Popen(kill_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as driver_kill:
self.log.info(
"Spark driver %s killed with return code: %s", self._driver_id, driver_kill.wait()
)
if self._submit_sp and self._submit_sp.poll() is None:
self.log.info("Sending kill signal to %s", self._connection["spark_binary"])
self._submit_sp.kill()
if self._yarn_application_id:
kill_cmd = f"yarn application -kill {self._yarn_application_id}".split()
env = {**os.environ, **(self._env or {})}
if self._keytab is not None and self._principal is not None:
# we are ignoring renewal failures from renew_from_kt
# here as the failure could just be due to a non-renewable ticket,
# we still attempt to kill the yarn application
renew_from_kt(self._principal, self._keytab, exit_on_fail=False)
env = os.environ.copy()
ccacche = airflow_conf.get_mandatory_value("kerberos", "ccache")
env["KRB5CCNAME"] = ccacche
with subprocess.Popen(
kill_cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
) as yarn_kill:
self.log.info("YARN app killed with return code: %s", yarn_kill.wait())
if self._kubernetes_driver_pod:
self.log.info("Killing pod %s on Kubernetes", self._kubernetes_driver_pod)
# Currently only instantiate Kubernetes client for killing a spark pod.
try:
import kubernetes
client = kube_client.get_kube_client()
api_response = client.delete_namespaced_pod(
self._kubernetes_driver_pod,
self._connection["namespace"],
body=kubernetes.client.V1DeleteOptions(),
pretty=True,
)
self.log.info("Spark on K8s killed with response: %s", api_response)
except kube_client.ApiException:
self.log.exception("Exception when attempting to kill Spark on K8s")
| 28,672 | 42.642314 | 108 | py |
airflow | airflow-main/airflow/providers/apache/spark/hooks/spark_sql.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import subprocess
from typing import TYPE_CHECKING, Any
from airflow.exceptions import AirflowException, AirflowNotFoundException
from airflow.hooks.base import BaseHook
if TYPE_CHECKING:
from airflow.models.connection import Connection
class SparkSqlHook(BaseHook):
"""
This hook is a wrapper around the spark-sql binary; requires the "spark-sql" binary to be in the PATH.
:param sql: The SQL query to execute
:param conf: arbitrary Spark configuration property
:param conn_id: connection_id string
:param total_executor_cores: (Standalone & Mesos only) Total cores for all executors
(Default: all the available cores on the worker)
:param executor_cores: (Standalone & YARN only) Number of cores per
executor (Default: 2)
:param executor_memory: Memory per executor (e.g. 1000M, 2G) (Default: 1G)
:param keytab: Full path to the file that contains the keytab
:param master: spark://host:port, mesos://host:port, yarn, or local
(Default: The ``host`` and ``port`` set in the Connection, or ``"yarn"``)
:param name: Name of the job.
:param num_executors: Number of executors to launch
:param verbose: Whether to pass the verbose flag to spark-sql
:param yarn_queue: The YARN queue to submit to
(Default: The ``queue`` value set in the Connection, or ``"default"``)
"""
conn_name_attr = "conn_id"
default_conn_name = "spark_sql_default"
conn_type = "spark_sql"
hook_name = "Spark SQL"
def __init__(
self,
sql: str,
conf: str | None = None,
conn_id: str = default_conn_name,
total_executor_cores: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
keytab: str | None = None,
principal: str | None = None,
master: str | None = None,
name: str = "default-name",
num_executors: int | None = None,
verbose: bool = True,
yarn_queue: str | None = None,
) -> None:
super().__init__()
options: dict = {}
conn: Connection | None = None
try:
conn = self.get_connection(conn_id)
except AirflowNotFoundException:
conn = None
if conn:
options = conn.extra_dejson
# Set arguments to values set in Connection if not explicitly provided.
if master is None:
if conn is None:
master = "yarn"
elif conn.port:
master = f"{conn.host}:{conn.port}"
else:
master = conn.host
if yarn_queue is None:
yarn_queue = options.get("queue", "default")
self._sql = sql
self._conf = conf
self._total_executor_cores = total_executor_cores
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._keytab = keytab
self._principal = principal
self._master = master
self._name = name
self._num_executors = num_executors
self._verbose = verbose
self._yarn_queue = yarn_queue
self._sp: Any = None
def get_conn(self) -> Any:
pass
def _prepare_command(self, cmd: str | list[str]) -> list[str]:
"""
Construct the spark-sql command to execute. Verbose output is enabled as default.
:param cmd: command to append to the spark-sql command
:return: full command to be executed
"""
connection_cmd = ["spark-sql"]
if self._conf:
for conf_el in self._conf.split(","):
connection_cmd += ["--conf", conf_el]
if self._total_executor_cores:
connection_cmd += ["--total-executor-cores", str(self._total_executor_cores)]
if self._executor_cores:
connection_cmd += ["--executor-cores", str(self._executor_cores)]
if self._executor_memory:
connection_cmd += ["--executor-memory", self._executor_memory]
if self._keytab:
connection_cmd += ["--keytab", self._keytab]
if self._principal:
connection_cmd += ["--principal", self._principal]
if self._num_executors:
connection_cmd += ["--num-executors", str(self._num_executors)]
if self._sql:
sql = self._sql.strip()
if sql.endswith(".sql") or sql.endswith(".hql"):
connection_cmd += ["-f", sql]
else:
connection_cmd += ["-e", sql]
if self._master:
connection_cmd += ["--master", self._master]
if self._name:
connection_cmd += ["--name", self._name]
if self._verbose:
connection_cmd += ["--verbose"]
if self._yarn_queue:
connection_cmd += ["--queue", self._yarn_queue]
if isinstance(cmd, str):
connection_cmd += cmd.split()
elif isinstance(cmd, list):
connection_cmd += cmd
else:
raise AirflowException(f"Invalid additional command: {cmd}")
self.log.debug("Spark-Sql cmd: %s", connection_cmd)
return connection_cmd
def run_query(self, cmd: str = "", **kwargs: Any) -> None:
"""
Remote Popen (actually execute the Spark-sql query).
:param cmd: command to append to the spark-sql command
:param kwargs: extra arguments to Popen (see subprocess.Popen)
"""
spark_sql_cmd = self._prepare_command(cmd)
self._sp = subprocess.Popen(
spark_sql_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, **kwargs
)
for line in iter(self._sp.stdout): # type: ignore
self.log.info(line)
returncode = self._sp.wait()
if returncode:
raise AirflowException(
f"Cannot execute '{self._sql}' on {self._master} (additional parameters: '{cmd}'). "
f"Process exit code: {returncode}."
)
def kill(self) -> None:
"""Kill Spark job."""
if self._sp and self._sp.poll() is None:
self.log.info("Killing the Spark-Sql job")
self._sp.kill()
| 7,037 | 36.042105 | 110 | py |
airflow | airflow-main/airflow/providers/apache/spark/hooks/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 787 | 42.777778 | 62 | py |
airflow | airflow-main/airflow/providers/apache/spark/hooks/spark_jdbc.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import os
from typing import Any
from airflow.exceptions import AirflowException
from airflow.providers.apache.spark.hooks.spark_submit import SparkSubmitHook
class SparkJDBCHook(SparkSubmitHook):
"""
Extends the SparkSubmitHook for performing data transfers to/from JDBC-based databases with Apache Spark.
:param spark_app_name: Name of the job (default airflow-spark-jdbc)
:param spark_conn_id: The :ref:`spark connection id <howto/connection:spark>`
as configured in Airflow administration
:param spark_conf: Any additional Spark configuration properties
:param spark_py_files: Additional python files used (.zip, .egg, or .py)
:param spark_files: Additional files to upload to the container running the job
:param spark_jars: Additional jars to upload and add to the driver and
executor classpath
:param num_executors: number of executor to run. This should be set so as to manage
the number of connections made with the JDBC database
:param executor_cores: Number of cores per executor
:param executor_memory: Memory per executor (e.g. 1000M, 2G)
:param driver_memory: Memory allocated to the driver (e.g. 1000M, 2G)
:param verbose: Whether to pass the verbose flag to spark-submit for debugging
:param keytab: Full path to the file that contains the keytab
:param principal: The name of the kerberos principal used for keytab
:param cmd_type: Which way the data should flow. 2 possible values:
spark_to_jdbc: data written by spark from metastore to jdbc
jdbc_to_spark: data written by spark from jdbc to metastore
:param jdbc_table: The name of the JDBC table
:param jdbc_conn_id: Connection id used for connection to JDBC database
:param jdbc_driver: Name of the JDBC driver to use for the JDBC connection. This
driver (usually a jar) should be passed in the 'jars' parameter
:param metastore_table: The name of the metastore table,
:param jdbc_truncate: (spark_to_jdbc only) Whether or not Spark should truncate or
drop and recreate the JDBC table. This only takes effect if
'save_mode' is set to Overwrite. Also, if the schema is
different, Spark cannot truncate, and will drop and recreate
:param save_mode: The Spark save-mode to use (e.g. overwrite, append, etc.)
:param save_format: (jdbc_to_spark-only) The Spark save-format to use (e.g. parquet)
:param batch_size: (spark_to_jdbc only) The size of the batch to insert per round
trip to the JDBC database. Defaults to 1000
:param fetch_size: (jdbc_to_spark only) The size of the batch to fetch per round trip
from the JDBC database. Default depends on the JDBC driver
:param num_partitions: The maximum number of partitions that can be used by Spark
simultaneously, both for spark_to_jdbc and jdbc_to_spark
operations. This will also cap the number of JDBC connections
that can be opened
:param partition_column: (jdbc_to_spark-only) A numeric column to be used to
partition the metastore table by. If specified, you must
also specify:
num_partitions, lower_bound, upper_bound
:param lower_bound: (jdbc_to_spark-only) Lower bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, upper_bound
:param upper_bound: (jdbc_to_spark-only) Upper bound of the range of the numeric
partition column to fetch. If specified, you must also specify:
num_partitions, partition_column, lower_bound
:param create_table_column_types: (spark_to_jdbc-only) The database column data types
to use instead of the defaults, when creating the
table. Data type information should be specified in
the same format as CREATE TABLE columns syntax
(e.g: "name CHAR(64), comments VARCHAR(1024)").
The specified types should be valid spark sql data
types.
"""
conn_name_attr = "spark_conn_id"
default_conn_name = "spark_default"
conn_type = "spark_jdbc"
hook_name = "Spark JDBC"
def __init__(
self,
spark_app_name: str = "airflow-spark-jdbc",
spark_conn_id: str = default_conn_name,
spark_conf: dict[str, Any] | None = None,
spark_py_files: str | None = None,
spark_files: str | None = None,
spark_jars: str | None = None,
num_executors: int | None = None,
executor_cores: int | None = None,
executor_memory: str | None = None,
driver_memory: str | None = None,
verbose: bool = False,
principal: str | None = None,
keytab: str | None = None,
cmd_type: str = "spark_to_jdbc",
jdbc_table: str | None = None,
jdbc_conn_id: str = "jdbc-default",
jdbc_driver: str | None = None,
metastore_table: str | None = None,
jdbc_truncate: bool = False,
save_mode: str | None = None,
save_format: str | None = None,
batch_size: int | None = None,
fetch_size: int | None = None,
num_partitions: int | None = None,
partition_column: str | None = None,
lower_bound: str | None = None,
upper_bound: str | None = None,
create_table_column_types: str | None = None,
*args: Any,
**kwargs: Any,
):
super().__init__(*args, **kwargs)
self._name = spark_app_name
self._conn_id = spark_conn_id
self._conf = spark_conf or {}
self._py_files = spark_py_files
self._files = spark_files
self._jars = spark_jars
self._num_executors = num_executors
self._executor_cores = executor_cores
self._executor_memory = executor_memory
self._driver_memory = driver_memory
self._verbose = verbose
self._keytab = keytab
self._principal = principal
self._cmd_type = cmd_type
self._jdbc_table = jdbc_table
self._jdbc_conn_id = jdbc_conn_id
self._jdbc_driver = jdbc_driver
self._metastore_table = metastore_table
self._jdbc_truncate = jdbc_truncate
self._save_mode = save_mode
self._save_format = save_format
self._batch_size = batch_size
self._fetch_size = fetch_size
self._num_partitions = num_partitions
self._partition_column = partition_column
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._create_table_column_types = create_table_column_types
self._jdbc_connection = self._resolve_jdbc_connection()
def _resolve_jdbc_connection(self) -> dict[str, Any]:
conn_data = {"url": "", "schema": "", "conn_prefix": "", "user": "", "password": ""}
try:
conn = self.get_connection(self._jdbc_conn_id)
if "/" in conn.host:
raise ValueError("The jdbc host should not contain a '/'")
if "?" in conn.schema:
raise ValueError("The jdbc schema should not contain a '?'")
if conn.port:
conn_data["url"] = f"{conn.host}:{conn.port}"
else:
conn_data["url"] = conn.host
conn_data["schema"] = conn.schema
conn_data["user"] = conn.login
conn_data["password"] = conn.password
extra = conn.extra_dejson
conn_data["conn_prefix"] = extra.get("conn_prefix", "")
except AirflowException:
self.log.debug(
"Could not load jdbc connection string %s, defaulting to %s", self._jdbc_conn_id, ""
)
return conn_data
def _build_jdbc_application_arguments(self, jdbc_conn: dict[str, Any]) -> Any:
arguments = []
arguments += ["-cmdType", self._cmd_type]
if self._jdbc_connection["url"]:
arguments += [
"-url",
f"{jdbc_conn['conn_prefix']}{jdbc_conn['url']}/{jdbc_conn['schema']}",
]
if self._jdbc_connection["user"]:
arguments += ["-user", self._jdbc_connection["user"]]
if self._jdbc_connection["password"]:
arguments += ["-password", self._jdbc_connection["password"]]
if self._metastore_table:
arguments += ["-metastoreTable", self._metastore_table]
if self._jdbc_table:
arguments += ["-jdbcTable", self._jdbc_table]
if self._jdbc_truncate:
arguments += ["-jdbcTruncate", str(self._jdbc_truncate)]
if self._jdbc_driver:
arguments += ["-jdbcDriver", self._jdbc_driver]
if self._batch_size:
arguments += ["-batchsize", str(self._batch_size)]
if self._fetch_size:
arguments += ["-fetchsize", str(self._fetch_size)]
if self._num_partitions:
arguments += ["-numPartitions", str(self._num_partitions)]
if self._partition_column and self._lower_bound and self._upper_bound and self._num_partitions:
# these 3 parameters need to be used all together to take effect.
arguments += [
"-partitionColumn",
self._partition_column,
"-lowerBound",
self._lower_bound,
"-upperBound",
self._upper_bound,
]
if self._save_mode:
arguments += ["-saveMode", self._save_mode]
if self._save_format:
arguments += ["-saveFormat", self._save_format]
if self._create_table_column_types:
arguments += ["-createTableColumnTypes", self._create_table_column_types]
return arguments
def submit_jdbc_job(self) -> None:
"""Submit Spark JDBC job."""
self._application_args = self._build_jdbc_application_arguments(self._jdbc_connection)
self.submit(application=f"{os.path.dirname(os.path.abspath(__file__))}/spark_jdbc_script.py")
def get_conn(self) -> Any:
pass
| 11,356 | 48.164502 | 109 | py |
airflow | airflow-main/airflow/providers/apache/kylin/__init__.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# NOTE! THIS FILE IS AUTOMATICALLY GENERATED AND WILL BE
# OVERWRITTEN WHEN PREPARING DOCUMENTATION FOR THE PACKAGES.
#
# IF YOU WANT TO MODIFY IT, YOU SHOULD MODIFY THE TEMPLATE
# `PROVIDER__INIT__PY_TEMPLATE.py.jinja2` IN the `dev/provider_packages` DIRECTORY
#
from __future__ import annotations
import packaging.version
__all__ = ["__version__"]
__version__ = "3.2.1"
try:
from airflow import __version__ as airflow_version
except ImportError:
from airflow.version import version as airflow_version
if packaging.version.parse(airflow_version) < packaging.version.parse("2.4.0"):
raise RuntimeError(
f"The package `apache-airflow-providers-apache-kylin:{__version__}` requires Apache Airflow 2.4.0+" # NOQA: E501
)
| 1,537 | 35.619048 | 121 | py |
airflow | airflow-main/airflow/providers/apache/kylin/operators/__init__.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
| 785 | 45.235294 | 62 | py |
airflow | airflow-main/airflow/providers/apache/kylin/operators/kylin_cube.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import time
from datetime import datetime
from typing import TYPE_CHECKING, Sequence
from kylinpy import kylinpy
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.apache.kylin.hooks.kylin import KylinHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class KylinCubeOperator(BaseOperator):
"""Submit request about Kylin build/refresh/merge and track job status.
For more detail information in
`Apache Kylin <http://kylin.apache.org/>`_
:param kylin_conn_id: The connection id as configured in Airflow administration.
:param project: kylin project name, this param will overwrite the project in kylin_conn_id:
:param cube: kylin cube name
:param dsn: (dsn , dsn url of kylin connection ,which will overwrite kylin_conn_id.
for example: kylin://ADMIN:KYLIN@sandbox/learn_kylin?timeout=60&is_debug=1)
:param command: (kylin command include 'build', 'merge', 'refresh', 'delete',
'build_streaming', 'merge_streaming', 'refresh_streaming', 'disable', 'enable',
'purge', 'clone', 'drop'.
build - use /kylin/api/cubes/{cubeName}/build rest api,and buildType is 'BUILD',
and you should give start_time and end_time
refresh - use build rest api,and buildType is 'REFRESH'
merge - use build rest api,and buildType is 'MERGE'
build_streaming - use /kylin/api/cubes/{cubeName}/build2 rest api,and buildType is 'BUILD'
and you should give offset_start and offset_end
refresh_streaming - use build2 rest api,and buildType is 'REFRESH'
merge_streaming - use build2 rest api,and buildType is 'MERGE'
delete - delete segment, and you should give segment_name value
disable - disable cube
enable - enable cube
purge - purge cube
clone - clone cube,new cube name is {cube_name}_clone
drop - drop cube)
:param start_time: build segment start time
:param end_time: build segment end time
:param offset_start: streaming build segment start time
:param offset_end: streaming build segment end time
:param segment_name: segment name
:param is_track_job: (whether to track job status. if value is True,will track job until
job status is in("FINISHED", "ERROR", "DISCARDED", "KILLED", "SUICIDAL",
"STOPPED") or timeout)
:param interval: track job status,default value is 60s
:param timeout: timeout value,default value is 1 day,60 * 60 * 24 s
:param eager_error_status: (jobs error status,if job status in this list ,this task will be error.
default value is tuple(["ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"]))
"""
template_fields: Sequence[str] = (
"project",
"cube",
"dsn",
"command",
"start_time",
"end_time",
"segment_name",
"offset_start",
"offset_end",
)
ui_color = "#E79C46"
build_command = {
"fullbuild",
"build",
"merge",
"refresh",
"build_streaming",
"merge_streaming",
"refresh_streaming",
}
jobs_end_status = {"FINISHED", "ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"}
def __init__(
self,
*,
kylin_conn_id: str = "kylin_default",
project: str | None = None,
cube: str | None = None,
dsn: str | None = None,
command: str | None = None,
start_time: str | None = None,
end_time: str | None = None,
offset_start: str | None = None,
offset_end: str | None = None,
segment_name: str | None = None,
is_track_job: bool = False,
interval: int = 60,
timeout: int = 60 * 60 * 24,
eager_error_status=("ERROR", "DISCARDED", "KILLED", "SUICIDAL", "STOPPED"),
**kwargs,
):
super().__init__(**kwargs)
self.kylin_conn_id = kylin_conn_id
self.project = project
self.cube = cube
self.dsn = dsn
self.command = command
self.start_time = start_time
self.end_time = end_time
self.segment_name = segment_name
self.offset_start = offset_start
self.offset_end = offset_end
self.is_track_job = is_track_job
self.interval = interval
self.timeout = timeout
self.eager_error_status = eager_error_status
self.jobs_error_status = [stat.upper() for stat in eager_error_status]
def execute(self, context: Context):
_hook = KylinHook(kylin_conn_id=self.kylin_conn_id, project=self.project, dsn=self.dsn)
_support_invoke_command = kylinpy.CubeSource.support_invoke_command
if not self.command:
raise AirflowException(f"Kylin:Command {self.command} can not be empty")
if self.command.lower() not in _support_invoke_command:
raise AirflowException(
f"Kylin:Command {self.command} can not match kylin command list {_support_invoke_command}"
)
kylinpy_params = {
"start": datetime.fromtimestamp(int(self.start_time) / 1000) if self.start_time else None,
"end": datetime.fromtimestamp(int(self.end_time) / 1000) if self.end_time else None,
"name": self.segment_name,
"offset_start": int(self.offset_start) if self.offset_start else None,
"offset_end": int(self.offset_end) if self.offset_end else None,
}
rsp_data = _hook.cube_run(self.cube, self.command.lower(), **kylinpy_params)
if self.is_track_job and self.command.lower() in self.build_command:
started_at = time.monotonic()
job_id = rsp_data.get("uuid")
if job_id is None:
raise AirflowException("kylin job id is None")
self.log.info("kylin job id: %s", job_id)
job_status = None
while job_status not in self.jobs_end_status:
if time.monotonic() - started_at > self.timeout:
raise AirflowException(f"kylin job {job_id} timeout")
time.sleep(self.interval)
job_status = _hook.get_job_status(job_id)
self.log.info("Kylin job status is %s ", job_status)
if job_status in self.jobs_error_status:
raise AirflowException(f"Kylin job {job_id} status {job_status} is error ")
if self.do_xcom_push:
return rsp_data
| 7,309 | 40.771429 | 106 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.