response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Generate documentation; used by Sphinx argparse.
def get_parser() -> argparse.ArgumentParser: """Generate documentation; used by Sphinx argparse.""" from airflow.cli.cli_parser import AirflowHelpFormatter, _add_command parser = DefaultHelpParser(prog="airflow", formatter_class=AirflowHelpFormatter) subparsers = parser.add_subparsers(dest="subcommand", metavar="GROUP_OR_COMMAND") for group_command in AwsAuthManager.get_cli_commands(): _add_command(subparsers, group_command) return parser
Return entity type. :param resource_type: Resource type. Example: Airflow::Action, Airflow::Group, Airflow::Variable, Airflow::User.
def get_entity_type(resource_type: AvpEntities) -> str: """ Return entity type. :param resource_type: Resource type. Example: Airflow::Action, Airflow::Group, Airflow::Variable, Airflow::User. """ return AVP_PREFIX_ENTITIES + resource_type.value
Return action id. Convention for action ID is <resource_type>.<method>. Example: Variable.GET. :param resource_type: Resource type. :param method: Resource method.
def get_action_id(resource_type: AvpEntities, method: ResourceMethod | str): """ Return action id. Convention for action ID is <resource_type>.<method>. Example: Variable.GET. :param resource_type: Resource type. :param method: Resource method. """ return f"{resource_type.value}.{method}"
Initialize Amazon Verified Permissions resources.
def init_avp(args): """Initialize Amazon Verified Permissions resources.""" client = _get_client() # Create the policy store if needed policy_store_id, is_new_policy_store = _create_policy_store(client, args) if not is_new_policy_store: print( f"Since an existing policy store with description '{args.policy_store_description}' has been found in Amazon Verified Permissions, " "the CLI made no changes to this policy store for security reasons. " "Any modification to this policy store must be done manually.", ) else: # Set the schema _set_schema(client, policy_store_id, args) if not args.dry_run: print( "Please set configs below in Airflow configuration under AIRFLOW__AWS_AUTH_MANAGER__<config name>." ) print(json.dumps({"avp_policy_store_id": policy_store_id}, indent=4))
Update Amazon Verified Permissions policy store schema.
def update_schema(args): """Update Amazon Verified Permissions policy store schema.""" client = _get_client() _set_schema(client, args.policy_store_id, args)
Return Amazon Verified Permissions client.
def _get_client(): """Return Amazon Verified Permissions client.""" region_name = conf.get(CONF_SECTION_NAME, CONF_REGION_NAME_KEY) return boto3.client("verifiedpermissions", region_name=region_name)
Create if needed the policy store. This function returns two elements: - the policy store ID - whether the policy store ID returned refers to a newly created policy store.
def _create_policy_store(client: BaseClient, args) -> tuple[str | None, bool]: """ Create if needed the policy store. This function returns two elements: - the policy store ID - whether the policy store ID returned refers to a newly created policy store. """ paginator = client.get_paginator("list_policy_stores") pages = paginator.paginate() policy_stores = [application for page in pages for application in page["policyStores"]] existing_policy_stores = [ policy_store for policy_store in policy_stores if policy_store.get("description") == args.policy_store_description ] if args.verbose: log.debug("Policy stores found: %s", policy_stores) log.debug("Existing policy stores found: %s", existing_policy_stores) if len(existing_policy_stores) > 0: print( f"There is already a policy store with description '{args.policy_store_description}' in Amazon Verified Permissions: '{existing_policy_stores[0]['policyStoreId']}'." ) return existing_policy_stores[0]["policyStoreId"], False else: print(f"No policy store with description '{args.policy_store_description}' found, creating one.") if args.dry_run: print( f"Dry run, not creating the policy store with description '{args.policy_store_description}'." ) return None, True response = client.create_policy_store( validationSettings={ "mode": "STRICT", }, description=args.policy_store_description, ) if args.verbose: log.debug("Response from create_policy_store: %s", response) print(f"Policy store created: '{response['policyStoreId']}'") return response["policyStoreId"], True
Set the policy store schema.
def _set_schema(client: BaseClient, policy_store_id: str, args) -> None: """Set the policy store schema.""" if args.dry_run: print(f"Dry run, not updating the schema of the policy store with ID '{policy_store_id}'.") return schema_path = Path(__file__).parents[1] / "avp" / "schema.json" with open(schema_path) as schema_file: response = client.put_schema( policyStoreId=policy_store_id, definition={ "cedarJson": schema_file.read(), }, ) if args.verbose: log.debug("Response from put_schema: %s", response) print("Policy store schema updated.")
Initialize AWS IAM Identity Center resources.
def init_idc(args): """Initialize AWS IAM Identity Center resources.""" client = _get_client() # Create the instance if needed instance_arn = _create_instance(client, args) # Create the application if needed _create_application(client, instance_arn, args) if not args.dry_run: print("AWS IAM Identity Center resources created successfully.")
Return AWS IAM Identity Center client.
def _get_client(): """Return AWS IAM Identity Center client.""" region_name = conf.get(CONF_SECTION_NAME, CONF_REGION_NAME_KEY) return boto3.client("sso-admin", region_name=region_name)
Create if needed AWS IAM Identity Center instance.
def _create_instance(client: BaseClient, args) -> str | None: """Create if needed AWS IAM Identity Center instance.""" instances = client.list_instances() if args.verbose: log.debug("Instances found: %s", instances) if len(instances["Instances"]) > 0: print( f"There is already an instance configured in AWS IAM Identity Center: '{instances['Instances'][0]['InstanceArn']}'. " "No need to create a new one." ) return instances["Instances"][0]["InstanceArn"] else: print("No instance configured in AWS IAM Identity Center, creating one.") if args.dry_run: print("Dry run, not creating the instance.") return None response = client.create_instance(Name=args.instance_name) if args.verbose: log.debug("Response from create_instance: %s", response) print(f"Instance created: '{response['InstanceArn']}'") return response["InstanceArn"]
Create if needed AWS IAM identity Center application.
def _create_application(client: BaseClient, instance_arn: str | None, args) -> str | None: """Create if needed AWS IAM identity Center application.""" paginator = client.get_paginator("list_applications") pages = paginator.paginate(InstanceArn=instance_arn or "") applications = [application for page in pages for application in page["Applications"]] existing_applications = [ application for application in applications if application["Name"] == args.application_name ] if args.verbose: log.debug("Applications found: %s", applications) log.debug("Existing applications found: %s", existing_applications) if len(existing_applications) > 0: print( f"There is already an application named '{args.application_name}' in AWS IAM Identity Center: '{existing_applications[0]['ApplicationArn']}'. " "Using this application." ) return existing_applications[0]["ApplicationArn"] else: print(f"No application named {args.application_name} found, creating one.") if args.dry_run: print("Dry run, not creating the application.") return None try: response = client.create_application( ApplicationProviderArn="arn:aws:sso::aws:applicationProvider/custom-saml", Description="Application automatically created through the Airflow CLI. This application is used to access Airflow environment.", InstanceArn=instance_arn, Name=args.application_name, PortalOptions={ "SignInOptions": { "Origin": "IDENTITY_CENTER", }, "Visibility": "ENABLED", }, Status="ENABLED", ) if args.verbose: log.debug("Response from create_application: %s", response) except ClientError as e: # This is needed because as of today, the create_application in AWS Identity Center does not support SAML application # Remove this part when it is supported if "is not supported for this action" in e.response["Error"]["Message"]: print( "Creation of SAML applications is only supported in AWS console today. " "Please create the application through the console." ) raise print(f"Application created: '{response['ApplicationArn']}'") return response["ApplicationArn"]
Recursively unpack a nested dict and return it as a flat dict. For example, _flatten_dict({'a': 'a', 'b': 'b', 'c': {'d': 'd'}}) returns {'a': 'a', 'b': 'b', 'd': 'd'}.
def _recursive_flatten_dict(nested_dict): """ Recursively unpack a nested dict and return it as a flat dict. For example, _flatten_dict({'a': 'a', 'b': 'b', 'c': {'d': 'd'}}) returns {'a': 'a', 'b': 'b', 'd': 'd'}. """ items = [] for key, value in nested_dict.items(): if isinstance(value, dict): items.extend(_recursive_flatten_dict(value).items()) else: items.append((key, value)) return dict(items)
Convert "assign_public_ip" from True/False to ENABLE/DISABLE.
def parse_assign_public_ip(assign_public_ip): """Convert "assign_public_ip" from True/False to ENABLE/DISABLE.""" return "ENABLED" if assign_public_ip == "True" else "DISABLED"
Accept a potentially nested dictionary and recursively convert all keys into camelCase.
def camelize_dict_keys(nested_dict) -> dict: """Accept a potentially nested dictionary and recursively convert all keys into camelCase.""" result = {} for key, value in nested_dict.items(): new_key = camelize(key, uppercase_first_letter=False) if isinstance(value, dict) and (key.lower() != "tags"): # The key name on tags can be whatever the user wants, and we should not mess with them. result[new_key] = camelize_dict_keys(value) else: result[new_key] = nested_dict[key] return result
Calculate the exponential backoff (in seconds) until the next attempt. :param attempt_number: Number of attempts since last success. :param max_delay: Maximum delay in seconds between retries. Default 120. :param exponent_base: Exponent base to calculate delay. Default 4.
def calculate_next_attempt_delay( attempt_number: int, max_delay: int = 60 * 2, exponent_base: int = 4, ) -> timedelta: """ Calculate the exponential backoff (in seconds) until the next attempt. :param attempt_number: Number of attempts since last success. :param max_delay: Maximum delay in seconds between retries. Default 120. :param exponent_base: Exponent base to calculate delay. Default 4. """ return timedelta(seconds=min((exponent_base**attempt_number), max_delay))
Retry a callable function with exponential backoff between attempts if it raises an exception. :param last_attempt_time: Timestamp of last attempt call. :param attempts_since_last_successful: Number of attempts since last success. :param callable_function: Callable function that will be called if enough time has passed. :param max_delay: Maximum delay in seconds between retries. Default 120. :param max_attempts: Maximum number of attempts before giving up. Default -1 (no limit). :param exponent_base: Exponent base to calculate delay. Default 4.
def exponential_backoff_retry( last_attempt_time: datetime, attempts_since_last_successful: int, callable_function: Callable, max_delay: int = 60 * 2, max_attempts: int = -1, exponent_base: int = 4, ) -> None: """ Retry a callable function with exponential backoff between attempts if it raises an exception. :param last_attempt_time: Timestamp of last attempt call. :param attempts_since_last_successful: Number of attempts since last success. :param callable_function: Callable function that will be called if enough time has passed. :param max_delay: Maximum delay in seconds between retries. Default 120. :param max_attempts: Maximum number of attempts before giving up. Default -1 (no limit). :param exponent_base: Exponent base to calculate delay. Default 4. """ if max_attempts != -1 and attempts_since_last_successful >= max_attempts: log.error("Max attempts reached. Exiting.") return next_retry_time = last_attempt_time + calculate_next_attempt_delay( attempt_number=attempts_since_last_successful, max_delay=max_delay, exponent_base=exponent_base ) current_time = timezone.utcnow() if current_time >= next_retry_time: try: callable_function() except Exception: log.exception("Error calling %r", callable_function.__name__) next_delay = calculate_next_attempt_delay( attempts_since_last_successful + 1, max_delay, exponent_base ) log.info("Waiting for %s seconds before retrying.", next_delay)
Resolve custom SessionFactory class.
def resolve_session_factory() -> type[BaseSessionFactory]: """Resolve custom SessionFactory class.""" clazz = conf.getimport("aws", "session_factory", fallback=None) if not clazz: return BaseSessionFactory if not issubclass(clazz, BaseSessionFactory): raise TypeError( f"Your custom AWS SessionFactory class `{clazz.__name__}` is not a subclass " f"of `{BaseSessionFactory.__name__}`." ) return clazz
For compatibility with airflow.contrib.hooks.aws_hook.
def _parse_s3_config(config_file_name: str, config_format: str | None = "boto", profile: str | None = None): """For compatibility with airflow.contrib.hooks.aws_hook.""" from airflow.providers.amazon.aws.utils.connection_wrapper import _parse_s3_config return _parse_s3_config( config_file_name=config_file_name, config_format=config_format, profile=profile, )
Check if exception is related to ECS resource quota (CPU, MEM).
def should_retry(exception: Exception): """Check if exception is related to ECS resource quota (CPU, MEM).""" if isinstance(exception, EcsOperatorError): return any( quota_reason in failure["reason"] for quota_reason in ["RESOURCE:MEMORY", "RESOURCE:CPU"] for failure in exception.failures ) return False
Check if exception is related to ENI (Elastic Network Interfaces).
def should_retry_eni(exception: Exception): """Check if exception is related to ENI (Elastic Network Interfaces).""" if isinstance(exception, EcsTaskFailToStart): return any( eni_reason in exception.message for eni_reason in ["network interface provisioning", "ResourceInitializationError"] ) return False
Provide a bucket name taken from the connection if no bucket name has been passed to the function.
def provide_bucket_name(func: Callable) -> Callable: """Provide a bucket name taken from the connection if no bucket name has been passed to the function.""" if hasattr(func, "_unify_bucket_name_and_key_wrapped"): logger.warning("`unify_bucket_name_and_key` should wrap `provide_bucket_name`.") function_signature = signature(func) @wraps(func) def wrapper(*args, **kwargs) -> Callable: bound_args = function_signature.bind(*args, **kwargs) if "bucket_name" not in bound_args.arguments: self = args[0] if "bucket_name" in self.service_config: bound_args.arguments["bucket_name"] = self.service_config["bucket_name"] elif self.conn_config and self.conn_config.schema: warnings.warn( "s3 conn_type, and the associated schema field, is deprecated. " "Please use aws conn_type instead, and specify `bucket_name` " "in `service_config.s3` within `extras`.", AirflowProviderDeprecationWarning, stacklevel=2, ) bound_args.arguments["bucket_name"] = self.conn_config.schema return func(*bound_args.args, **bound_args.kwargs) return wrapper
Provide a bucket name taken from the connection if no bucket name has been passed to the function.
def provide_bucket_name_async(func: Callable) -> Callable: """Provide a bucket name taken from the connection if no bucket name has been passed to the function.""" function_signature = signature(func) @wraps(func) async def wrapper(*args: Any, **kwargs: Any) -> Any: bound_args = function_signature.bind(*args, **kwargs) if "bucket_name" not in bound_args.arguments: self = args[0] if self.aws_conn_id: connection = await sync_to_async(self.get_connection)(self.aws_conn_id) if connection.schema: bound_args.arguments["bucket_name"] = connection.schema return await func(*bound_args.args, **bound_args.kwargs) return wrapper
Unify bucket name and key in case no bucket name and at least a key has been passed to the function.
def unify_bucket_name_and_key(func: Callable) -> Callable: """Unify bucket name and key in case no bucket name and at least a key has been passed to the function.""" function_signature = signature(func) @wraps(func) def wrapper(*args, **kwargs) -> Callable: bound_args = function_signature.bind(*args, **kwargs) if "wildcard_key" in bound_args.arguments: key_name = "wildcard_key" elif "key" in bound_args.arguments: key_name = "key" else: raise ValueError("Missing key parameter!") if "bucket_name" not in bound_args.arguments: with suppress(S3HookUriParseFailure): bound_args.arguments["bucket_name"], bound_args.arguments[key_name] = S3Hook.parse_s3_url( bound_args.arguments[key_name] ) return func(*bound_args.args, **bound_args.kwargs) # set attr _unify_bucket_name_and_key_wrapped so that we can check at # class definition that unify is the first decorator applied # if provide_bucket_name is applied first, and there's a bucket defined in conn # then if user supplies full key, bucket in key is not respected wrapper._unify_bucket_name_and_key_wrapped = True # type: ignore[attr-defined] return wrapper
Given callable ``f``, find index in ``arr`` to minimize ``f(arr[i])``. None is returned if ``arr`` is empty.
def argmin(arr, f: Callable) -> int | None: """Given callable ``f``, find index in ``arr`` to minimize ``f(arr[i])``. None is returned if ``arr`` is empty. """ min_value = None min_idx = None for idx, item in enumerate(arr): if item is not None: if min_value is None or f(item) < min_value: min_value = f(item) min_idx = idx return min_idx
Check if training job's secondary status message has changed. :param current_job_description: Current job description, returned from DescribeTrainingJob call. :param prev_job_description: Previous job description, returned from DescribeTrainingJob call. :return: Whether the secondary status message of a training job changed or not.
def secondary_training_status_changed(current_job_description: dict, prev_job_description: dict) -> bool: """Check if training job's secondary status message has changed. :param current_job_description: Current job description, returned from DescribeTrainingJob call. :param prev_job_description: Previous job description, returned from DescribeTrainingJob call. :return: Whether the secondary status message of a training job changed or not. """ current_secondary_status_transitions = current_job_description.get("SecondaryStatusTransitions") if not current_secondary_status_transitions: return False prev_job_secondary_status_transitions = ( prev_job_description.get("SecondaryStatusTransitions") if prev_job_description is not None else None ) last_message = ( prev_job_secondary_status_transitions[-1]["StatusMessage"] if prev_job_secondary_status_transitions else "" ) message = current_job_description["SecondaryStatusTransitions"][-1]["StatusMessage"] return message != last_message
Format string containing start time and the secondary training job status message. :param job_description: Returned response from DescribeTrainingJob call :param prev_description: Previous job description from DescribeTrainingJob call :return: Job status string to be printed.
def secondary_training_status_message( job_description: dict[str, list[Any]], prev_description: dict | None ) -> str: """Format string containing start time and the secondary training job status message. :param job_description: Returned response from DescribeTrainingJob call :param prev_description: Previous job description from DescribeTrainingJob call :return: Job status string to be printed. """ current_transitions = job_description.get("SecondaryStatusTransitions") if not current_transitions: return "" prev_transitions_num = 0 if prev_description is not None: if prev_description.get("SecondaryStatusTransitions") is not None: prev_transitions_num = len(prev_description["SecondaryStatusTransitions"]) transitions_to_print = ( current_transitions[-1:] if len(current_transitions) == prev_transitions_num else current_transitions[prev_transitions_num - len(current_transitions) :] ) status_strs = [] for transition in transitions_to_print: message = transition["StatusMessage"] time_utc = timezone.convert_to_utc(cast(datetime, job_description["LastModifiedTime"])) status_strs.append(f"{time_utc:%Y-%m-%d %H:%M:%S} {transition['Status']} - {message}") return "\n".join(status_strs)
Retrieve the S3 URI to EMR Serverless Job logs. Any EMR Serverless job may have a different S3 logging location (or none), which is an S3 URI. The logging location is then {s3_uri}/applications/{application_id}/jobs/{job_run_id}.
def get_serverless_log_uri(*, s3_log_uri: str, application_id: str, job_run_id: str) -> str: """ Retrieve the S3 URI to EMR Serverless Job logs. Any EMR Serverless job may have a different S3 logging location (or none), which is an S3 URI. The logging location is then {s3_uri}/applications/{application_id}/jobs/{job_run_id}. """ return f"{s3_log_uri}/applications/{application_id}/jobs/{job_run_id}"
Retrieve the URL to EMR Serverless dashboard. The URL is a one-use, ephemeral link that expires in 1 hour and is accessible without authentication. Either an AWS connection ID or existing EMR Serverless client must be passed. If the connection ID is passed, a client is generated using that connection.
def get_serverless_dashboard_url( *, aws_conn_id: str | None = None, emr_serverless_client: boto3.client = None, application_id: str, job_run_id: str, ) -> ParseResult | None: """ Retrieve the URL to EMR Serverless dashboard. The URL is a one-use, ephemeral link that expires in 1 hour and is accessible without authentication. Either an AWS connection ID or existing EMR Serverless client must be passed. If the connection ID is passed, a client is generated using that connection. """ if not exactly_one(aws_conn_id, emr_serverless_client): raise AirflowException("Requires either an AWS connection ID or an EMR Serverless Client.") if aws_conn_id: # If get_dashboard_for_job_run fails for whatever reason, fail after 1 attempt # so that the rest of the links load in a reasonable time frame. hook = EmrServerlessHook(aws_conn_id=aws_conn_id, config={"retries": {"total_max_attempts": 1}}) emr_serverless_client = hook.conn response = emr_serverless_client.get_dashboard_for_job_run( applicationId=application_id, jobRunId=job_run_id ) if "url" not in response: return None log_uri = urlparse(response["url"]) return log_uri
Retrieve the S3 URI to the EMR Job logs. Requires either the output of a describe_cluster call or both an EMR Client and a job_flow_id..
def get_log_uri( *, cluster: dict[str, Any] | None = None, emr_client: boto3.client = None, job_flow_id: str | None = None ) -> str | None: """ Retrieve the S3 URI to the EMR Job logs. Requires either the output of a describe_cluster call or both an EMR Client and a job_flow_id.. """ if not exactly_one(bool(cluster), emr_client and job_flow_id): raise AirflowException( "Requires either the output of a describe_cluster call or both an EMR Client and a job_flow_id." ) cluster_info = (cluster or emr_client.describe_cluster(ClusterId=job_flow_id))["Cluster"] if "LogUri" not in cluster_info: return None log_uri = S3Hook.parse_s3_url(cluster_info["LogUri"]) return "/".join(log_uri)
JSON serializer replicating legacy watchtower behavior. The legacy `[email protected]` json serializer function that serialized datetime objects as ISO format and all other non-JSON-serializable to `null`. :param value: the object to serialize :return: string representation of `value` if it is an instance of datetime or `None` otherwise
def json_serialize_legacy(value: Any) -> str | None: """ JSON serializer replicating legacy watchtower behavior. The legacy `[email protected]` json serializer function that serialized datetime objects as ISO format and all other non-JSON-serializable to `null`. :param value: the object to serialize :return: string representation of `value` if it is an instance of datetime or `None` otherwise """ if isinstance(value, (date, datetime)): return value.isoformat() else: return None
JSON serializer replicating current watchtower behavior. This provides customers with an accessible import, `airflow.providers.amazon.aws.log.cloudwatch_task_handler.json_serialize` :param value: the object to serialize :return: string representation of `value`
def json_serialize(value: Any) -> str | None: """ JSON serializer replicating current watchtower behavior. This provides customers with an accessible import, `airflow.providers.amazon.aws.log.cloudwatch_task_handler.json_serialize` :param value: the object to serialize :return: string representation of `value` """ return watchtower._json_serialize_default(value)
Parse a config file for S3 credentials. Can currently parse boto, s3cmd.conf and AWS SDK config formats. :param config_file_name: path to the config file :param config_format: config type. One of "boto", "s3cmd" or "aws". Defaults to "boto" :param profile: profile name in AWS type config file
def _parse_s3_config( config_file_name: str, config_format: str | None = "boto", profile: str | None = None ) -> tuple[str | None, str | None]: """Parse a config file for S3 credentials. Can currently parse boto, s3cmd.conf and AWS SDK config formats. :param config_file_name: path to the config file :param config_format: config type. One of "boto", "s3cmd" or "aws". Defaults to "boto" :param profile: profile name in AWS type config file """ import configparser config = configparser.ConfigParser() try: if config.read(config_file_name): # pragma: no cover sections = config.sections() else: raise AirflowException(f"Couldn't read {config_file_name}") except Exception as e: raise AirflowException("Exception when parsing %s: %s", config_file_name, e.__class__.__name__) # Setting option names depending on file format if config_format is None: config_format = "boto" conf_format = config_format.lower() if conf_format == "boto": # pragma: no cover if profile is not None and "profile " + profile in sections: cred_section = "profile " + profile else: cred_section = "Credentials" elif conf_format == "aws" and profile is not None: cred_section = profile else: cred_section = "default" # Option names if conf_format in ("boto", "aws"): # pragma: no cover key_id_option = "aws_access_key_id" secret_key_option = "aws_secret_access_key" else: key_id_option = "access_key" secret_key_option = "secret_key" # Actual Parsing if cred_section not in sections: raise AirflowException("This config file format is not recognized") else: try: access_key = config.get(cred_section, key_id_option) secret_key = config.get(cred_section, secret_key_option) mask_secret(secret_key) except Exception: raise AirflowException("Option Error in parsing s3 config file") return access_key, secret_key
Email backend for SES.
def send_email( to: list[str] | str, subject: str, html_content: str, files: list | None = None, cc: list[str] | str | None = None, bcc: list[str] | str | None = None, mime_subtype: str = "mixed", mime_charset: str = "utf-8", conn_id: str = "aws_default", from_email: str | None = None, custom_headers: dict[str, Any] | None = None, **kwargs, ) -> None: """Email backend for SES.""" if from_email is None: raise RuntimeError("The `from_email' configuration has to be set for the SES emailer.") hook = SesHook(aws_conn_id=conn_id) hook.send_email( mail_from=from_email, to=to, subject=subject, html_content=html_content, files=files, cc=cc, bcc=bcc, mime_subtype=mime_subtype, mime_charset=mime_charset, custom_headers=custom_headers, )
Convert input values to deterministic UUID string representation. This function is only intended to generate a hash which used as an identifier, not for any security use. Generates a UUID v5 (SHA-1 + Namespace) for each value provided, and this UUID is used as the Namespace for the next element. If only one non-None value is provided to the function, then the result of the function would be the same as result of ``uuid.uuid5``. All ``None`` values are replaced by NIL UUID. If it only one value is provided then return NIL UUID. :param namespace: Initial namespace value to pass into the ``uuid.uuid5`` function.
def generate_uuid(*values: str | None, namespace: UUID = NAMESPACE_OID) -> str: """ Convert input values to deterministic UUID string representation. This function is only intended to generate a hash which used as an identifier, not for any security use. Generates a UUID v5 (SHA-1 + Namespace) for each value provided, and this UUID is used as the Namespace for the next element. If only one non-None value is provided to the function, then the result of the function would be the same as result of ``uuid.uuid5``. All ``None`` values are replaced by NIL UUID. If it only one value is provided then return NIL UUID. :param namespace: Initial namespace value to pass into the ``uuid.uuid5`` function. """ if not values: raise ValueError("Expected at least 1 argument") if len(values) == 1 and values[0] is None: return str(NIL_UUID) result = namespace for item in values: result = uuid5(result, item if item is not None else str(NIL_UUID)) return str(result)
Merge provided template_fields with generic one and return in alphabetical order.
def aws_template_fields(*template_fields: str) -> tuple[str, ...]: """Merge provided template_fields with generic one and return in alphabetical order.""" if not all(isinstance(tf, str) for tf in template_fields): msg = ( "Expected that all provided arguments are strings, but got " f"{', '.join(map(repr, template_fields))}." ) raise TypeError(msg) return tuple(sorted(list({"aws_conn_id", "region_name", "verify"} | set(template_fields))))
Generate AWS credentials block for Redshift COPY and UNLOAD commands. See AWS docs for details: https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html#copy-credentials :param credentials: ReadOnlyCredentials object from `botocore`
def build_credentials_block(credentials: ReadOnlyCredentials) -> str: """Generate AWS credentials block for Redshift COPY and UNLOAD commands. See AWS docs for details: https://docs.aws.amazon.com/redshift/latest/dg/copy-parameters-authorization.html#copy-credentials :param credentials: ReadOnlyCredentials object from `botocore` """ if credentials.token: log.debug("STS token found in credentials, including it in the command") # these credentials are obtained from AWS STS # so the token must be included in the CREDENTIALS clause credentials_line = ( f"aws_access_key_id={credentials.access_key};" f"aws_secret_access_key={credentials.secret_key};" f"token={credentials.token}" ) else: credentials_line = ( f"aws_access_key_id={credentials.access_key};aws_secret_access_key={credentials.secret_key}" ) return credentials_line
Process the response from SQS. :param response: The response from SQS :return: The processed response
def process_response( response: Any, message_filtering: MessageFilteringType | None = None, message_filtering_match_values: Any = None, message_filtering_config: Any = None, ) -> Any: """ Process the response from SQS. :param response: The response from SQS :return: The processed response """ if not isinstance(response, dict) or "Messages" not in response: return [] messages = response["Messages"] num_messages = len(messages) log.info("Received %d messages", num_messages) if num_messages and message_filtering: messages = filter_messages( messages, message_filtering, message_filtering_match_values, message_filtering_config ) num_messages = len(messages) log.info("There are %d messages left after filtering", num_messages) return messages
Suppress any ``Exception`` raised in decorator function. Main use-case when functional is optional, however any error on functions/methods might raise any error which are subclass of ``Exception``. .. note:: Decorator doesn't intend to catch ``BaseException``, e.g. ``GeneratorExit``, ``KeyboardInterrupt``, ``SystemExit`` and others. .. warning:: Only for internal usage, this decorator might be changed or removed in the future without any further notice. :param return_value: Return value if decorated function/method raise any ``Exception``. :meta: private
def return_on_error(return_value: RT): """ Suppress any ``Exception`` raised in decorator function. Main use-case when functional is optional, however any error on functions/methods might raise any error which are subclass of ``Exception``. .. note:: Decorator doesn't intend to catch ``BaseException``, e.g. ``GeneratorExit``, ``KeyboardInterrupt``, ``SystemExit`` and others. .. warning:: Only for internal usage, this decorator might be changed or removed in the future without any further notice. :param return_value: Return value if decorated function/method raise any ``Exception``. :meta: private """ def decorator(func: Callable[PS, RT]) -> Callable[PS, RT]: @wraps(func) def wrapper(*args, **kwargs) -> RT: try: return func(*args, **kwargs) except Exception: log.debug( "Encountered error during execution function/method %r", func.__name__, exc_info=True ) return return_value return wrapper return decorator
Format tags for boto call which expect a given format. If given a dictionary, formats it as an array of objects with a key and a value field to be passed to boto calls that expect this format. Else, assumes that it's already in the right format and returns it as is. We do not validate the format here since it's done by boto anyway, and the error would not be clearer if thrown from here. :param source: a dict from which keys and values are read :param key_label: optional, the label to use for keys if not "Key" :param value_label: optional, the label to use for values if not "Value"
def format_tags(source: Any, *, key_label: str = "Key", value_label: str = "Value"): """ Format tags for boto call which expect a given format. If given a dictionary, formats it as an array of objects with a key and a value field to be passed to boto calls that expect this format. Else, assumes that it's already in the right format and returns it as is. We do not validate the format here since it's done by boto anyway, and the error would not be clearer if thrown from here. :param source: a dict from which keys and values are read :param key_label: optional, the label to use for keys if not "Key" :param value_label: optional, the label to use for values if not "Value" """ if source is None: return [] elif isinstance(source, dict): return [{key_label: kvp[0], value_label: kvp[1]} for kvp in source.items()] else: return source
Call get_state_callable until it reaches the desired_state or the failure_states. PLEASE NOTE: While not yet deprecated, we are moving away from this method and encourage using the custom boto waiters as explained in https://github.com/apache/airflow/tree/main/airflow/providers/amazon/aws/waiters :param get_state_callable: A callable to run until it returns True :param get_state_args: Arguments to pass to get_state_callable :param parse_response: Dictionary keys to extract state from response of get_state_callable :param desired_state: Wait until the getter returns this value :param failure_states: A set of states which indicate failure and should throw an exception if any are reached before the desired_state :param object_type: Used for the reporting string. What are you waiting for? (application, job, etc) :param action: Used for the reporting string. What action are you waiting for? (created, deleted, etc) :param countdown: Number of seconds the waiter should wait for the desired state before timing out. Defaults to 25 * 60 seconds. None = infinite. :param check_interval_seconds: Number of seconds waiter should wait before attempting to retry get_state_callable. Defaults to 60 seconds.
def waiter( get_state_callable: Callable, get_state_args: dict, parse_response: list, desired_state: set, failure_states: set, object_type: str, action: str, countdown: int | float | None = 25 * 60, check_interval_seconds: int = 60, ) -> None: """ Call get_state_callable until it reaches the desired_state or the failure_states. PLEASE NOTE: While not yet deprecated, we are moving away from this method and encourage using the custom boto waiters as explained in https://github.com/apache/airflow/tree/main/airflow/providers/amazon/aws/waiters :param get_state_callable: A callable to run until it returns True :param get_state_args: Arguments to pass to get_state_callable :param parse_response: Dictionary keys to extract state from response of get_state_callable :param desired_state: Wait until the getter returns this value :param failure_states: A set of states which indicate failure and should throw an exception if any are reached before the desired_state :param object_type: Used for the reporting string. What are you waiting for? (application, job, etc) :param action: Used for the reporting string. What action are you waiting for? (created, deleted, etc) :param countdown: Number of seconds the waiter should wait for the desired state before timing out. Defaults to 25 * 60 seconds. None = infinite. :param check_interval_seconds: Number of seconds waiter should wait before attempting to retry get_state_callable. Defaults to 60 seconds. """ while True: state = get_state(get_state_callable(**get_state_args), parse_response) if state in desired_state: break if state in failure_states: raise AirflowException(f"{object_type.title()} reached failure state {state}.") if countdown is None: countdown = float("inf") if countdown > check_interval_seconds: countdown -= check_interval_seconds log.info("Waiting for %s to be %s.", object_type.lower(), action.lower()) time.sleep(check_interval_seconds) else: message = f"{object_type.title()} still not {action.lower()} after the allocated time limit." log.error(message) raise RuntimeError(message)
Use a boto waiter to poll an AWS service for the specified state. Although this function uses boto waiters to poll the state of the service, it logs the response of the service after every attempt, which is not currently supported by boto waiters. :param waiter: The boto waiter to use. :param waiter_delay: The amount of time in seconds to wait between attempts. :param waiter_max_attempts: The maximum number of attempts to be made. :param args: The arguments to pass to the waiter. :param failure_message: The message to log if a failure state is reached. :param status_message: The message logged when printing the status of the service. :param status_args: A list containing the JMESPath queries to retrieve status information from the waiter response. e.g. response = {"Cluster": {"state": "CREATING"}} status_args = ["Cluster.state"] response = { "Clusters": [{"state": "CREATING", "details": "User initiated."},] } status_args = ["Clusters[0].state", "Clusters[0].details"]
def wait( waiter: Waiter, waiter_delay: int, waiter_max_attempts: int, args: dict[str, Any], failure_message: str, status_message: str, status_args: list[str], ) -> None: """ Use a boto waiter to poll an AWS service for the specified state. Although this function uses boto waiters to poll the state of the service, it logs the response of the service after every attempt, which is not currently supported by boto waiters. :param waiter: The boto waiter to use. :param waiter_delay: The amount of time in seconds to wait between attempts. :param waiter_max_attempts: The maximum number of attempts to be made. :param args: The arguments to pass to the waiter. :param failure_message: The message to log if a failure state is reached. :param status_message: The message logged when printing the status of the service. :param status_args: A list containing the JMESPath queries to retrieve status information from the waiter response. e.g. response = {"Cluster": {"state": "CREATING"}} status_args = ["Cluster.state"] response = { "Clusters": [{"state": "CREATING", "details": "User initiated."},] } status_args = ["Clusters[0].state", "Clusters[0].details"] """ log = logging.getLogger(__name__) for attempt in range(waiter_max_attempts): if attempt: time.sleep(waiter_delay) try: waiter.wait(**args, WaiterConfig={"MaxAttempts": 1}) except WaiterError as error: if "terminal failure" in str(error): log.error("%s: %s", failure_message, _LazyStatusFormatter(status_args, error.last_response)) raise AirflowException(f"{failure_message}: {error}") log.info("%s: %s", status_message, _LazyStatusFormatter(status_args, error.last_response)) else: break else: raise AirflowException("Waiter error: max attempts reached")
Convert a datetime object to an epoch integer (seconds).
def datetime_to_epoch(date_time: datetime) -> int: """Convert a datetime object to an epoch integer (seconds).""" return int(date_time.timestamp())
Convert a datetime object to an epoch integer (milliseconds).
def datetime_to_epoch_ms(date_time: datetime) -> int: """Convert a datetime object to an epoch integer (milliseconds).""" return int(date_time.timestamp() * 1_000)
Convert a datetime object to an epoch integer (milliseconds) in UTC timezone.
def datetime_to_epoch_utc_ms(date_time: datetime) -> int: """Convert a datetime object to an epoch integer (milliseconds) in UTC timezone.""" return int(date_time.replace(tzinfo=timezone.utc).timestamp() * 1_000)
Convert a datetime object to an epoch integer (microseconds).
def datetime_to_epoch_us(date_time: datetime) -> int: """Convert a datetime object to an epoch integer (microseconds).""" return int(date_time.timestamp() * 1_000_000)
Return a formatted pipeline options from a dictionary of arguments. The logic of this method should be compatible with Apache Beam: https://github.com/apache/beam/blob/b56740f0e8cd80c2873412847d0b336837429fb9/sdks/python/ apache_beam/options/pipeline_options.py#L230-L251 :param options: Dictionary with options :return: List of arguments
def beam_options_to_args(options: dict) -> list[str]: """ Return a formatted pipeline options from a dictionary of arguments. The logic of this method should be compatible with Apache Beam: https://github.com/apache/beam/blob/b56740f0e8cd80c2873412847d0b336837429fb9/sdks/python/ apache_beam/options/pipeline_options.py#L230-L251 :param options: Dictionary with options :return: List of arguments """ if not options: return [] args: list[str] = [] for attr, value in options.items(): if value is None or (isinstance(value, bool) and value): args.append(f"--{attr}") elif isinstance(value, bool) and not value: continue elif isinstance(value, list): args.extend([f"--{attr}={v}" for v in value]) else: args.append(f"--{attr}={value}") return args
Print output to logs. :param proc: subprocess. :param fd: File descriptor. :param process_line_callback: Optional callback which can be used to process stdout and stderr to detect job id. :param log: logger.
def process_fd( proc, fd, log: logging.Logger, process_line_callback: Callable[[str], None] | None = None, check_job_status_callback: Callable[[], bool | None] | None = None, ): """ Print output to logs. :param proc: subprocess. :param fd: File descriptor. :param process_line_callback: Optional callback which can be used to process stdout and stderr to detect job id. :param log: logger. """ if fd not in (proc.stdout, proc.stderr): raise AirflowException("No data in stderr or in stdout.") fd_to_log = {proc.stderr: log.warning, proc.stdout: log.info} func_log = fd_to_log[fd] for line in iter(fd.readline, b""): line = line.decode() if process_line_callback: process_line_callback(line) func_log(line.rstrip("\n")) if check_job_status_callback and check_job_status_callback(): return
Run pipeline command in subprocess. :param cmd: Parts of the command to be run in subprocess :param process_line_callback: Optional callback which can be used to process stdout and stderr to detect job id :param working_directory: Working directory :param log: logger.
def run_beam_command( cmd: list[str], log: logging.Logger, process_line_callback: Callable[[str], None] | None = None, working_directory: str | None = None, check_job_status_callback: Callable[[], bool | None] | None = None, ) -> None: """ Run pipeline command in subprocess. :param cmd: Parts of the command to be run in subprocess :param process_line_callback: Optional callback which can be used to process stdout and stderr to detect job id :param working_directory: Working directory :param log: logger. """ log.info("Running command: %s", " ".join(shlex.quote(c) for c in cmd)) proc = subprocess.Popen( cmd, cwd=working_directory, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True, ) # Waits for Apache Beam pipeline to complete. log.info("Start waiting for Apache Beam process to complete.") reads = [proc.stderr, proc.stdout] while True: # Wait for at least one available fd. readable_fds, _, _ = select.select(reads, [], [], 5) if readable_fds is None: log.info("Waiting for Apache Beam process to complete.") continue for readable_fd in readable_fds: process_fd(proc, readable_fd, log, process_line_callback, check_job_status_callback) if check_job_status_callback and check_job_status_callback(): return if proc.poll() is not None: break # Corner case: check if more output was created between the last read and the process termination for readable_fd in reads: process_fd(proc, readable_fd, log, process_line_callback, check_job_status_callback) log.info("Process exited with return code: %s", proc.returncode) if proc.returncode != 0: raise AirflowException(f"Apache Beam process failed with return code {proc.returncode}")
Extract context from env variable, (dag_id, task_id, etc) for use in BashOperator and PythonOperator. :return: The context of interest.
def get_context_from_env_var() -> dict[Any, Any]: """ Extract context from env variable, (dag_id, task_id, etc) for use in BashOperator and PythonOperator. :return: The context of interest. """ return { format_map["default"]: os.environ.get(format_map["env_var_format"], "") for format_map in AIRFLOW_VAR_NAME_FORMAT_MAPPING.values() }
Get the max partition for a table. :param schema: The hive schema the table lives in :param table: The hive table you are interested in, supports the dot notation as in "my_database.my_table", if a dot is found, the schema param is disregarded :param metastore_conn_id: The hive connection you are interested in. If your default is set you don't need to use this parameter. :param filter_map: partition_key:partition_value map used for partition filtering, e.g. {'key1': 'value1', 'key2': 'value2'}. Only partitions matching all partition_key:partition_value pairs will be considered as candidates of max partition. :param field: the field to get the max value from. If there's only one partition field, this will be inferred >>> max_partition("airflow.static_babynames_partitioned") '2015-01-01'
def max_partition( table, schema="default", field=None, filter_map=None, metastore_conn_id="metastore_default" ): """ Get the max partition for a table. :param schema: The hive schema the table lives in :param table: The hive table you are interested in, supports the dot notation as in "my_database.my_table", if a dot is found, the schema param is disregarded :param metastore_conn_id: The hive connection you are interested in. If your default is set you don't need to use this parameter. :param filter_map: partition_key:partition_value map used for partition filtering, e.g. {'key1': 'value1', 'key2': 'value2'}. Only partitions matching all partition_key:partition_value pairs will be considered as candidates of max partition. :param field: the field to get the max value from. If there's only one partition field, this will be inferred >>> max_partition("airflow.static_babynames_partitioned") '2015-01-01' """ from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook if "." in table: schema, table = table.split(".") hive_hook = HiveMetastoreHook(metastore_conn_id=metastore_conn_id) return hive_hook.max_partition(schema=schema, table_name=table, field=field, filter_map=filter_map)
Find the date in a list closest to the target date. An optional parameter can be given to get the closest before or after. :param target_dt: The target date :param date_list: The list of dates to search :param before_target: closest before or after the target :returns: The closest date
def _closest_date(target_dt, date_list, before_target=None) -> datetime.date | None: """ Find the date in a list closest to the target date. An optional parameter can be given to get the closest before or after. :param target_dt: The target date :param date_list: The list of dates to search :param before_target: closest before or after the target :returns: The closest date """ def time_before(d): return target_dt - d if d <= target_dt else datetime.timedelta.max def time_after(d): return d - target_dt if d >= target_dt else datetime.timedelta.max def any_time(d): return target_dt - d if d < target_dt else d - target_dt if before_target is None: return min(date_list, key=any_time).date() if before_target: return min(date_list, key=time_before).date() else: return min(date_list, key=time_after).date()
Find the date in a list closest to the target date. An optional parameter can be given to get the closest before or after. :param table: A hive table name :param ds: A datestamp ``%Y-%m-%d`` e.g. ``yyyy-mm-dd`` :param before: closest before (True), after (False) or either side of ds :param schema: table schema :param metastore_conn_id: which metastore connection to use :returns: The closest date >>> tbl = "airflow.static_babynames_partitioned" >>> closest_ds_partition(tbl, "2015-01-02") '2015-01-01'
def closest_ds_partition( table, ds, before=True, schema="default", metastore_conn_id="metastore_default" ) -> str | None: """ Find the date in a list closest to the target date. An optional parameter can be given to get the closest before or after. :param table: A hive table name :param ds: A datestamp ``%Y-%m-%d`` e.g. ``yyyy-mm-dd`` :param before: closest before (True), after (False) or either side of ds :param schema: table schema :param metastore_conn_id: which metastore connection to use :returns: The closest date >>> tbl = "airflow.static_babynames_partitioned" >>> closest_ds_partition(tbl, "2015-01-02") '2015-01-01' """ from airflow.providers.apache.hive.hooks.hive import HiveMetastoreHook if "." in table: schema, table = table.split(".") hive_hook = HiveMetastoreHook(metastore_conn_id=metastore_conn_id) partitions = hive_hook.get_partitions(schema=schema, table_name=table) if not partitions: return None part_vals = [next(iter(p.values())) for p in partitions] if ds in part_vals: return ds else: parts = [datetime.datetime.strptime(pv, "%Y-%m-%d") for pv in part_vals] target_dt = datetime.datetime.strptime(ds, "%Y-%m-%d") closest_ds = _closest_date(target_dt, parts, before_target=before) if closest_ds is not None: return closest_ds.isoformat() return None
Get Spark source from JDBC connection. :param spark_source: Spark source, here is Spark reader or writer :param url: JDBC resource url :param jdbc_table: JDBC resource table name :param user: JDBC resource user name :param password: JDBC resource password :param driver: JDBC resource driver
def set_common_options( spark_source: Any, url: str = "localhost:5432", jdbc_table: str = "default.default", user: str = "root", password: str = "root", driver: str = "driver", ) -> Any: """ Get Spark source from JDBC connection. :param spark_source: Spark source, here is Spark reader or writer :param url: JDBC resource url :param jdbc_table: JDBC resource table name :param user: JDBC resource user name :param password: JDBC resource password :param driver: JDBC resource driver """ spark_source = ( spark_source.format("jdbc") .option("url", url) .option("dbtable", jdbc_table) .option("user", user) .option("password", password) .option("driver", driver) ) return spark_source
Transfer data from Spark to JDBC source.
def spark_write_to_jdbc( spark_session: SparkSession, url: str, user: str, password: str, metastore_table: str, jdbc_table: str, driver: Any, truncate: bool, save_mode: str, batch_size: int, num_partitions: int, create_table_column_types: str, ) -> None: """Transfer data from Spark to JDBC source.""" writer = spark_session.table(metastore_table).write # first set common options writer = set_common_options(writer, url, jdbc_table, user, password, driver) # now set write-specific options if truncate: writer = writer.option("truncate", truncate) if batch_size: writer = writer.option("batchsize", batch_size) if num_partitions: writer = writer.option("numPartitions", num_partitions) if create_table_column_types: writer = writer.option("createTableColumnTypes", create_table_column_types) writer.save(mode=save_mode)
Transfer data from JDBC source to Spark.
def spark_read_from_jdbc( spark_session: SparkSession, url: str, user: str, password: str, metastore_table: str, jdbc_table: str, driver: Any, save_mode: str, save_format: str, fetch_size: int, num_partitions: int, partition_column: str, lower_bound: str, upper_bound: str, ) -> None: """Transfer data from JDBC source to Spark.""" # first set common options reader = set_common_options(spark_session.read, url, jdbc_table, user, password, driver) # now set specific read options if fetch_size: reader = reader.option("fetchsize", fetch_size) if num_partitions: reader = reader.option("numPartitions", num_partitions) if partition_column and lower_bound and upper_bound: reader = ( reader.option("partitionColumn", partition_column) .option("lowerBound", lower_bound) .option("upperBound", upper_bound) ) reader.load().write.saveAsTable(metastore_table, format=save_format, mode=save_mode)
Start Flower, Celery monitoring tool.
def flower(args): """Start Flower, Celery monitoring tool.""" # This needs to be imported locally to not trigger Providers Manager initialization from airflow.providers.celery.executors.celery_executor import app as celery_app options = [ "flower", conf.get("celery", "BROKER_URL"), f"--address={args.hostname}", f"--port={args.port}", ] if args.broker_api: options.append(f"--broker-api={args.broker_api}") if args.url_prefix: options.append(f"--url-prefix={args.url_prefix}") if args.basic_auth: options.append(f"--basic-auth={args.basic_auth}") if args.flower_conf: options.append(f"--conf={args.flower_conf}") _run_command_with_daemon_option( args=args, process_name="flower", callback=lambda: celery_app.start(options) )
Start serve_logs sub-process.
def _serve_logs(skip_serve_logs: bool = False): """Start serve_logs sub-process.""" sub_proc = None if skip_serve_logs is False: sub_proc = Process(target=serve_logs) sub_proc.start() try: yield finally: if sub_proc: sub_proc.terminate()
Reconfigure the logger. * remove any previously configured handlers * logs of severity error, and above goes to stderr, * logs of severity lower than error goes to stdout.
def logger_setup_handler(logger, **kwargs): """ Reconfigure the logger. * remove any previously configured handlers * logs of severity error, and above goes to stderr, * logs of severity lower than error goes to stdout. """ if conf.getboolean("logging", "celery_stdout_stderr_separation", fallback=False): celery_formatter = logging.Formatter(DEFAULT_TASK_LOG_FMT) class NoErrorOrAboveFilter(logging.Filter): """Allow only logs with level *lower* than ERROR to be reported.""" def filter(self, record): return record.levelno < logging.ERROR below_error_handler = logging.StreamHandler(sys.stdout) below_error_handler.addFilter(NoErrorOrAboveFilter()) below_error_handler.setFormatter(celery_formatter) from_error_handler = logging.StreamHandler(sys.stderr) from_error_handler.setLevel(logging.ERROR) from_error_handler.setFormatter(celery_formatter) logger.handlers[:] = [below_error_handler, from_error_handler]
Start Airflow Celery worker.
def worker(args): """Start Airflow Celery worker.""" # This needs to be imported locally to not trigger Providers Manager initialization from airflow.providers.celery.executors.celery_executor import app as celery_app # Disable connection pool so that celery worker does not hold an unnecessary db connection settings.reconfigure_orm(disable_connection_pool=True) if not settings.validate_session(): raise SystemExit("Worker exiting, database connection precheck failed.") autoscale = args.autoscale skip_serve_logs = args.skip_serve_logs if autoscale is None and conf.has_option("celery", "worker_autoscale"): autoscale = conf.get("celery", "worker_autoscale") if hasattr(celery_app.backend, "ResultSession"): # Pre-create the database tables now, otherwise SQLA via Celery has a # race condition where one of the subprocesses can die with "Table # already exists" error, because SQLA checks for which tables exist, # then issues a CREATE TABLE, rather than doing CREATE TABLE IF NOT # EXISTS try: session = celery_app.backend.ResultSession() session.close() except sqlalchemy.exc.IntegrityError: # At least on postgres, trying to create a table that already exist # gives a unique constraint violation or the # "pg_type_typname_nsp_index" table. If this happens we can ignore # it, we raced to create the tables and lost. pass # backwards-compatible: https://github.com/apache/airflow/pull/21506#pullrequestreview-879893763 celery_log_level = conf.get("logging", "CELERY_LOGGING_LEVEL") if not celery_log_level: celery_log_level = conf.get("logging", "LOGGING_LEVEL") # Setup Celery worker options = [ "worker", "-O", "fair", "--queues", args.queues, "--concurrency", args.concurrency, "--hostname", args.celery_hostname, "--loglevel", celery_log_level, ] if autoscale: options.extend(["--autoscale", autoscale]) if args.without_mingle: options.append("--without-mingle") if args.without_gossip: options.append("--without-gossip") if conf.has_option("celery", "pool"): pool = conf.get("celery", "pool") options.extend(["--pool", pool]) # Celery pools of type eventlet and gevent use greenlets, which # requires monkey patching the app: # https://eventlet.net/doc/patching.html#monkey-patch # Otherwise task instances hang on the workers and are never # executed. maybe_patch_concurrency(["-P", pool]) worker_pid_file_path, stdout, stderr, log_file = setup_locations( process=WORKER_PROCESS_NAME, stdout=args.stdout, stderr=args.stderr, log=args.log_file, pid=args.pid, ) def run_celery_worker(): with _serve_logs(skip_serve_logs): celery_app.worker_main(options) if args.umask: umask = args.umask else: umask = conf.get("celery", "worker_umask", fallback=settings.DAEMON_UMASK) _run_command_with_daemon_option( args=args, process_name=WORKER_PROCESS_NAME, callback=run_celery_worker, should_setup_logging=True, umask=umask, pid_file=worker_pid_file_path, )
Send SIGTERM to Celery worker.
def stop_worker(args): """Send SIGTERM to Celery worker.""" # Read PID from file if args.pid: pid_file_path = args.pid else: pid_file_path, _, _, _ = setup_locations(process=WORKER_PROCESS_NAME) pid = read_pid_from_pidfile(pid_file_path) # Send SIGTERM if pid: worker_process = psutil.Process(pid) worker_process.terminate() # Remove pid file remove_existing_pidfile(pid_file_path)
Generate documentation; used by Sphinx. :meta private:
def _get_parser() -> argparse.ArgumentParser: """ Generate documentation; used by Sphinx. :meta private: """ return CeleryExecutor._get_parser()
Init providers before importing the configuration, so the _SECRET and _CMD options work.
def _get_celery_app() -> Celery: """Init providers before importing the configuration, so the _SECRET and _CMD options work.""" global celery_configuration if conf.has_option("celery", "celery_config_options"): celery_configuration = conf.getimport("celery", "celery_config_options") else: from airflow.providers.celery.executors.default_celery import DEFAULT_CELERY_CONFIG celery_configuration = DEFAULT_CELERY_CONFIG celery_app_name = conf.get("celery", "CELERY_APP_NAME") if celery_app_name == "airflow.executors.celery_executor": warnings.warn( "The celery.CELERY_APP_NAME configuration uses deprecated package name: " "'airflow.executors.celery_executor'. " "Change it to `airflow.providers.celery.executors.celery_executor`, and " "update the `-app` flag in your Celery Health Checks " "to use `airflow.providers.celery.executors.celery_executor.app`.", AirflowProviderDeprecationWarning, stacklevel=2, ) return Celery(celery_app_name, config_source=celery_configuration)
Preload some "expensive" airflow modules once, so other task processes won't have to import it again. Loading these for each task adds 0.3-0.5s *per task* before the task can run. For long running tasks this doesn't matter, but for short tasks this starts to be a noticeable impact.
def on_celery_import_modules(*args, **kwargs): """ Preload some "expensive" airflow modules once, so other task processes won't have to import it again. Loading these for each task adds 0.3-0.5s *per task* before the task can run. For long running tasks this doesn't matter, but for short tasks this starts to be a noticeable impact. """ import jinja2.ext # noqa: F401 import airflow.jobs.local_task_job_runner import airflow.macros import airflow.operators.bash import airflow.operators.python import airflow.operators.subdag # noqa: F401 with contextlib.suppress(ImportError): import numpy # noqa: F401 with contextlib.suppress(ImportError): import kubernetes.client
Execute command.
def execute_command(command_to_exec: CommandType) -> None: """Execute command.""" dag_id, task_id = BaseExecutor.validate_airflow_tasks_run_command(command_to_exec) celery_task_id = app.current_task.request.id log.info("[%s] Executing command in Celery: %s", celery_task_id, command_to_exec) with _airflow_parsing_context_manager(dag_id=dag_id, task_id=task_id): try: if settings.EXECUTE_TASKS_NEW_PYTHON_INTERPRETER: _execute_in_subprocess(command_to_exec, celery_task_id) else: _execute_in_fork(command_to_exec, celery_task_id) except Exception: Stats.incr("celery.execute_command.failure") raise
Send task to executor.
def send_task_to_executor( task_tuple: TaskInstanceInCelery, ) -> tuple[TaskInstanceKey, CommandType, AsyncResult | ExceptionWithTraceback]: """Send task to executor.""" key, command, queue, task_to_run = task_tuple try: with timeout(seconds=OPERATION_TIMEOUT): result = task_to_run.apply_async(args=[command], queue=queue) except (Exception, AirflowTaskTimeout) as e: exception_traceback = f"Celery Task ID: {key}\n{traceback.format_exc()}" result = ExceptionWithTraceback(e, exception_traceback) return key, command, result
Fetch and return the state of the given celery task. The scope of this function is global so that it can be called by subprocesses in the pool. :param async_result: a tuple of the Celery task key and the async Celery object used to fetch the task's state :return: a tuple of the Celery task key and the Celery state and the celery info of the task
def fetch_celery_task_state(async_result: AsyncResult) -> tuple[str, str | ExceptionWithTraceback, Any]: """ Fetch and return the state of the given celery task. The scope of this function is global so that it can be called by subprocesses in the pool. :param async_result: a tuple of the Celery task key and the async Celery object used to fetch the task's state :return: a tuple of the Celery task key and the Celery state and the celery info of the task """ try: with timeout(seconds=OPERATION_TIMEOUT): # Accessing state property of celery task will make actual network request # to get the current state of the task info = async_result.info if hasattr(async_result, "info") else None return async_result.task_id, async_result.state, info except Exception as e: exception_traceback = f"Celery Task ID: {async_result}\n{traceback.format_exc()}" return async_result.task_id, ExceptionWithTraceback(e, exception_traceback), None
Attach additional specs to an existing pod object. :param pod: A pod to attach a list of Kubernetes objects to :param k8s_objects: a potential None list of K8SModels :return: pod with the objects attached if they exist
def append_to_pod(pod: k8s.V1Pod, k8s_objects: list[K8SModel] | None): """ Attach additional specs to an existing pod object. :param pod: A pod to attach a list of Kubernetes objects to :param k8s_objects: a potential None list of K8SModels :return: pod with the objects attached if they exist """ if not k8s_objects: return pod return reduce(lambda p, o: o.attach_to_pod(p), k8s_objects, pod)
Generate random lowercase alphanumeric string of length num. :meta private:
def rand_str(num): """Generate random lowercase alphanumeric string of length num. :meta private: """ return "".join(secrets.choice(alphanum_lower) for _ in range(num))
Add random string to pod or job name while staying under max length. :param name: name of the pod or job :param rand_len: length of the random string to append :param max_len: maximum length of the pod name :meta private:
def add_unique_suffix(*, name: str, rand_len: int = 8, max_len: int = POD_NAME_MAX_LENGTH) -> str: """Add random string to pod or job name while staying under max length. :param name: name of the pod or job :param rand_len: length of the random string to append :param max_len: maximum length of the pod name :meta private: """ suffix = "-" + rand_str(rand_len) return name[: max_len - len(suffix)].strip("-.") + suffix
Add random string to pod name while staying under max length. :param pod_name: name of the pod :param rand_len: length of the random string to append :param max_len: maximum length of the pod name :meta private:
def add_pod_suffix(*, pod_name: str, rand_len: int = 8, max_len: int = POD_NAME_MAX_LENGTH) -> str: """Add random string to pod name while staying under max length. :param pod_name: name of the pod :param rand_len: length of the random string to append :param max_len: maximum length of the pod name :meta private: """ warnings.warn( "This function is deprecated. Please use `add_unique_suffix`.", AirflowProviderDeprecationWarning, stacklevel=2, ) suffix = "-" + rand_str(rand_len) return pod_name[: max_len - len(suffix)].strip("-.") + suffix
Generate unique pod or job ID given a dag_id and / or task_id. :param dag_id: DAG ID :param task_id: Task ID :param max_length: max number of characters :param unique: whether a random string suffix should be added :return: A valid identifier for a kubernetes pod name
def create_unique_id( dag_id: str | None = None, task_id: str | None = None, *, max_length: int = POD_NAME_MAX_LENGTH, unique: bool = True, ) -> str: """ Generate unique pod or job ID given a dag_id and / or task_id. :param dag_id: DAG ID :param task_id: Task ID :param max_length: max number of characters :param unique: whether a random string suffix should be added :return: A valid identifier for a kubernetes pod name """ if not (dag_id or task_id): raise ValueError("Must supply either dag_id or task_id.") name = "" if dag_id: name += dag_id if task_id: if name: name += "-" name += task_id base_name = slugify(name, lowercase=True)[:max_length].strip(".-") if unique: return add_unique_suffix(name=base_name, rand_len=8, max_len=max_length) else: return base_name
Generate unique pod ID given a dag_id and / or task_id. :param dag_id: DAG ID :param task_id: Task ID :param max_length: max number of characters :param unique: whether a random string suffix should be added :return: A valid identifier for a kubernetes pod name
def create_pod_id( dag_id: str | None = None, task_id: str | None = None, *, max_length: int = POD_NAME_MAX_LENGTH, unique: bool = True, ) -> str: """ Generate unique pod ID given a dag_id and / or task_id. :param dag_id: DAG ID :param task_id: Task ID :param max_length: max number of characters :param unique: whether a random string suffix should be added :return: A valid identifier for a kubernetes pod name """ warnings.warn( "This function is deprecated. Please use `create_unique_id`.", AirflowProviderDeprecationWarning, stacklevel=2, ) if not (dag_id or task_id): raise ValueError("Must supply either dag_id or task_id.") name = "" if dag_id: name += dag_id if task_id: if name: name += "-" name += task_id base_name = slugify(name, lowercase=True)[:max_length].strip(".-") if unique: return add_pod_suffix(pod_name=base_name, rand_len=8, max_len=max_length) else: return base_name
Build a TaskInstanceKey based on pod annotations.
def annotations_to_key(annotations: dict[str, str]) -> TaskInstanceKey: """Build a TaskInstanceKey based on pod annotations.""" log.debug("Creating task key for annotations %s", annotations) dag_id = annotations["dag_id"] task_id = annotations["task_id"] try_number = int(annotations["try_number"]) annotation_run_id = annotations.get("run_id") map_index = int(annotations.get("map_index", -1)) # Compat: Look up the run_id from the TI table! from airflow.models.dagrun import DagRun from airflow.models.taskinstance import TaskInstance, TaskInstanceKey from airflow.settings import Session if not annotation_run_id and "execution_date" in annotations: execution_date = pendulum.parse(annotations["execution_date"]) # Do _not_ use create-session, we don't want to expunge session = Session() task_instance_run_id = ( session.query(TaskInstance.run_id) .join(TaskInstance.dag_run) .filter( TaskInstance.dag_id == dag_id, TaskInstance.task_id == task_id, DagRun.execution_date == execution_date, ) .scalar() ) else: task_instance_run_id = annotation_run_id return TaskInstanceKey( dag_id=dag_id, task_id=task_id, run_id=task_instance_run_id, try_number=try_number, map_index=map_index, )
Enable TCP keepalive mechanism. This prevents urllib3 connection to hang indefinitely when idle connection is time-outed on services like cloud load balancers or firewalls. See https://github.com/apache/airflow/pull/11406 for detailed explanation. Please ping @michalmisiewicz or @dimberman in the PR if you want to modify this function.
def _enable_tcp_keepalive() -> None: """ Enable TCP keepalive mechanism. This prevents urllib3 connection to hang indefinitely when idle connection is time-outed on services like cloud load balancers or firewalls. See https://github.com/apache/airflow/pull/11406 for detailed explanation. Please ping @michalmisiewicz or @dimberman in the PR if you want to modify this function. """ import socket from urllib3.connection import HTTPConnection, HTTPSConnection tcp_keep_idle = conf.getint("kubernetes_executor", "tcp_keep_idle") tcp_keep_intvl = conf.getint("kubernetes_executor", "tcp_keep_intvl") tcp_keep_cnt = conf.getint("kubernetes_executor", "tcp_keep_cnt") socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)] if hasattr(socket, "TCP_KEEPIDLE"): socket_options.append((socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, tcp_keep_idle)) else: log.debug("Unable to set TCP_KEEPIDLE on this platform") if hasattr(socket, "TCP_KEEPINTVL"): socket_options.append((socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, tcp_keep_intvl)) else: log.debug("Unable to set TCP_KEEPINTVL on this platform") if hasattr(socket, "TCP_KEEPCNT"): socket_options.append((socket.IPPROTO_TCP, socket.TCP_KEEPCNT, tcp_keep_cnt)) else: log.debug("Unable to set TCP_KEEPCNT on this platform") HTTPSConnection.default_socket_options = HTTPSConnection.default_socket_options + socket_options HTTPConnection.default_socket_options = HTTPConnection.default_socket_options + socket_options
Retrieve Kubernetes client. :param in_cluster: whether we are in cluster :param cluster_context: context of the cluster :param config_file: configuration file :return: kubernetes client
def get_kube_client( in_cluster: bool | None = None, cluster_context: str | None = None, config_file: str | None = None, ) -> client.CoreV1Api: """ Retrieve Kubernetes client. :param in_cluster: whether we are in cluster :param cluster_context: context of the cluster :param config_file: configuration file :return: kubernetes client """ if in_cluster is None: in_cluster = conf.getboolean("kubernetes_executor", "in_cluster") if not has_kubernetes: raise _import_err if conf.getboolean("kubernetes_executor", "enable_tcp_keepalive"): _enable_tcp_keepalive() configuration = _get_default_configuration() api_client_retry_configuration = conf.getjson( "kubernetes_executor", "api_client_retry_configuration", fallback={} ) if not conf.getboolean("kubernetes_executor", "verify_ssl"): _disable_verify_ssl() if isinstance(api_client_retry_configuration, dict): configuration.retries = urllib3.util.Retry(**api_client_retry_configuration) else: raise ValueError("api_client_retry_configuration should be a dictionary") if in_cluster: config.load_incluster_config(client_configuration=configuration) else: if cluster_context is None: cluster_context = conf.get("kubernetes_executor", "cluster_context", fallback=None) if config_file is None: config_file = conf.get("kubernetes_executor", "config_file", fallback=None) config.load_kube_config( config_file=config_file, context=cluster_context, client_configuration=configuration ) if not conf.getboolean("kubernetes_executor", "verify_ssl"): configuration.verify_ssl = False ssl_ca_cert = conf.get("kubernetes_executor", "ssl_ca_cert") if ssl_ca_cert: configuration.ssl_ca_cert = ssl_ca_cert api_client = client.ApiClient(configuration=configuration) return client.CoreV1Api(api_client)
Normalize a provided label to be of valid length and characters. Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. If the label value is greater than 63 chars once made safe, or differs in any way from the original value sent to this function, then we need to truncate to 53 chars, and append it with a unique hash.
def make_safe_label_value(string: str) -> str: """ Normalize a provided label to be of valid length and characters. Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. If the label value is greater than 63 chars once made safe, or differs in any way from the original value sent to this function, then we need to truncate to 53 chars, and append it with a unique hash. """ safe_label = re2.sub(r"^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$", "", string) if len(safe_label) > MAX_LABEL_LEN or string != safe_label: safe_hash = md5(string.encode()).hexdigest()[:9] safe_label = safe_label[: MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash return safe_label
Transform a datetime string to use as a label. Kubernetes doesn't like ":" in labels, since ISO datetime format uses ":" but not "_" let's replace ":" with "_" :param datetime_obj: datetime.datetime object :return: ISO-like string representing the datetime
def datetime_to_label_safe_datestring(datetime_obj: datetime.datetime) -> str: """ Transform a datetime string to use as a label. Kubernetes doesn't like ":" in labels, since ISO datetime format uses ":" but not "_" let's replace ":" with "_" :param datetime_obj: datetime.datetime object :return: ISO-like string representing the datetime """ return datetime_obj.isoformat().replace(":", "_").replace("+", "_plus_")
Transform a label back to a datetime object. Kubernetes doesn't permit ":" in labels. ISO datetime format uses ":" but not "_", let's replace ":" with "_" :param string: str :return: datetime.datetime object
def label_safe_datestring_to_datetime(string: str) -> datetime.datetime: """ Transform a label back to a datetime object. Kubernetes doesn't permit ":" in labels. ISO datetime format uses ":" but not "_", let's replace ":" with "_" :param string: str :return: datetime.datetime object """ return parser.parse(string.replace("_plus_", "+").replace("_", ":"))
Merge objects. :param base_obj: has the base attributes which are overwritten if they exist in the client_obj and remain if they do not exist in the client_obj :param client_obj: the object that the client wants to create. :return: the merged objects
def merge_objects(base_obj, client_obj): """ Merge objects. :param base_obj: has the base attributes which are overwritten if they exist in the client_obj and remain if they do not exist in the client_obj :param client_obj: the object that the client wants to create. :return: the merged objects """ if not base_obj: return client_obj if not client_obj: return base_obj client_obj_cp = copy.deepcopy(client_obj) if isinstance(base_obj, dict) and isinstance(client_obj_cp, dict): base_obj_cp = copy.deepcopy(base_obj) base_obj_cp.update(client_obj_cp) return base_obj_cp for base_key in base_obj.to_dict(): base_val = getattr(base_obj, base_key, None) if not getattr(client_obj, base_key, None) and base_val: if not isinstance(client_obj_cp, dict): setattr(client_obj_cp, base_key, base_val) else: client_obj_cp[base_key] = base_val return client_obj_cp
Add field values to existing objects. :param base_obj: an object which has a property `field_name` that is a list :param client_obj: an object which has a property `field_name` that is a list. A copy of this object is returned with `field_name` modified :param field_name: the name of the list field :return: the client_obj with the property `field_name` being the two properties appended
def extend_object_field(base_obj, client_obj, field_name): """ Add field values to existing objects. :param base_obj: an object which has a property `field_name` that is a list :param client_obj: an object which has a property `field_name` that is a list. A copy of this object is returned with `field_name` modified :param field_name: the name of the list field :return: the client_obj with the property `field_name` being the two properties appended """ client_obj_cp = copy.deepcopy(client_obj) base_obj_field = getattr(base_obj, field_name, None) client_obj_field = getattr(client_obj, field_name, None) if (not isinstance(base_obj_field, list) and base_obj_field is not None) or ( not isinstance(client_obj_field, list) and client_obj_field is not None ): raise ValueError( f"The chosen field must be a list. Got {type(base_obj_field)} base_object_field " f"and {type(client_obj_field)} client_object_field." ) if not base_obj_field: return client_obj_cp if not client_obj_field: setattr(client_obj_cp, field_name, base_obj_field) return client_obj_cp appended_fields = base_obj_field + client_obj_field setattr(client_obj_cp, field_name, appended_fields) return client_obj_cp
Normalize a provided label to be of valid length and characters. Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. If the label value is greater than 63 chars once made safe, or differs in any way from the original value sent to this function, then we need to truncate to 53 chars, and append it with a unique hash.
def make_safe_label_value(string): """ Normalize a provided label to be of valid length and characters. Valid label values must be 63 characters or less and must be empty or begin and end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_), dots (.), and alphanumerics between. If the label value is greater than 63 chars once made safe, or differs in any way from the original value sent to this function, then we need to truncate to 53 chars, and append it with a unique hash. """ from airflow.utils.hashlib_wrapper import md5 safe_label = re2.sub(r"^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$", "", string) if len(safe_label) > MAX_LABEL_LEN or string != safe_label: safe_hash = md5(string.encode()).hexdigest()[:9] safe_label = safe_label[: MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash return safe_label
Remove @task.kubernetes or similar as well as @setup and @teardown. :param python_source: python source code :param task_decorator_name: the task decorator name
def remove_task_decorator(python_source: str, task_decorator_name: str) -> str: """ Remove @task.kubernetes or similar as well as @setup and @teardown. :param python_source: python source code :param task_decorator_name: the task decorator name """ def _remove_task_decorator(py_source, decorator_name): if decorator_name not in py_source: return python_source split = python_source.split(decorator_name) before_decorator, after_decorator = split[0], split[1] if after_decorator[0] == "(": after_decorator = _balance_parens(after_decorator) if after_decorator[0] == "\n": after_decorator = after_decorator[1:] return before_decorator + after_decorator decorators = ["@setup", "@teardown", task_decorator_name] for decorator in decorators: python_source = _remove_task_decorator(python_source, decorator) return python_source
Render the python script to a file to execute in the virtual environment. :param jinja_context: The jinja context variables to unpack and replace with its placeholders in the template file. :param filename: The name of the file to dump the rendered script to. :param render_template_as_native_obj: If ``True``, rendered Jinja template would be converted to a native Python object
def write_python_script( jinja_context: dict, filename: str, render_template_as_native_obj: bool = False, ): """ Render the python script to a file to execute in the virtual environment. :param jinja_context: The jinja context variables to unpack and replace with its placeholders in the template file. :param filename: The name of the file to dump the rendered script to. :param render_template_as_native_obj: If ``True``, rendered Jinja template would be converted to a native Python object """ template_loader = FileSystemLoader(searchpath=os.path.dirname(__file__)) template_env: Environment if render_template_as_native_obj: template_env = NativeEnvironment(loader=template_loader, undefined=StrictUndefined) else: template_env = Environment( loader=template_loader, undefined=StrictUndefined, autoescape=select_autoescape(["html", "xml"]), ) template = template_env.get_template("python_kubernetes_script.jinja2") template.stream(**jinja_context).dump(filename)
Render k8s pod yaml.
def render_k8s_pod_yaml(task_instance: TaskInstance) -> dict | None: """Render k8s pod yaml.""" kube_config = KubeConfig() pod = PodGenerator.construct_pod( dag_id=task_instance.dag_id, run_id=task_instance.run_id, task_id=task_instance.task_id, map_index=task_instance.map_index, date=None, pod_id=create_pod_id(task_instance.dag_id, task_instance.task_id), try_number=task_instance.try_number, kube_image=kube_config.kube_image, args=task_instance.command_as_list(), pod_override_object=PodGenerator.from_obj(task_instance.executor_config), scheduler_job_id="0", namespace=kube_config.executor_namespace, base_worker_pod=PodGenerator.deserialize_model_file(kube_config.pod_template_file), with_mutation_hook=True, ) sanitized_pod = ApiClient().sanitize_for_serialization(pod) return sanitized_pod
Fetch rendered template fields from DB.
def get_rendered_k8s_spec(task_instance: TaskInstance, session=NEW_SESSION) -> dict | None: """Fetch rendered template fields from DB.""" from airflow.models.renderedtifields import RenderedTaskInstanceFields rendered_k8s_spec = RenderedTaskInstanceFields.get_k8s_pod_yaml(task_instance, session=session) if not rendered_k8s_spec: try: rendered_k8s_spec = render_k8s_pod_yaml(task_instance) except (TemplateAssertionError, UndefinedError) as e: raise AirflowException(f"Unable to render a k8s spec for this taskinstance: {e}") from e return rendered_k8s_spec
Convert an airflow Volume object into a k8s.V1Volume. :param volume:
def convert_volume(volume) -> k8s.V1Volume: """ Convert an airflow Volume object into a k8s.V1Volume. :param volume: """ return _convert_kube_model_object(volume, k8s.V1Volume)
Convert an airflow VolumeMount object into a k8s.V1VolumeMount. :param volume_mount:
def convert_volume_mount(volume_mount) -> k8s.V1VolumeMount: """ Convert an airflow VolumeMount object into a k8s.V1VolumeMount. :param volume_mount: """ return _convert_kube_model_object(volume_mount, k8s.V1VolumeMount)
Convert an airflow Port object into a k8s.V1ContainerPort. :param port:
def convert_port(port) -> k8s.V1ContainerPort: """ Convert an airflow Port object into a k8s.V1ContainerPort. :param port: """ return _convert_kube_model_object(port, k8s.V1ContainerPort)
Coerce env var collection for kubernetes. If the collection is a str-str dict, convert it into a list of ``V1EnvVar``s.
def convert_env_vars(env_vars: list[k8s.V1EnvVar] | dict[str, str]) -> list[k8s.V1EnvVar]: """ Coerce env var collection for kubernetes. If the collection is a str-str dict, convert it into a list of ``V1EnvVar``s. """ if isinstance(env_vars, dict): return [k8s.V1EnvVar(name=k, value=v) for k, v in env_vars.items()] return env_vars
Separate function to convert env var collection for kubernetes and then raise an error if it is still the wrong type. This is used after the template strings have been rendered.
def convert_env_vars_or_raise_error(env_vars: list[k8s.V1EnvVar] | dict[str, str]) -> list[k8s.V1EnvVar]: """ Separate function to convert env var collection for kubernetes and then raise an error if it is still the wrong type. This is used after the template strings have been rendered. """ env_vars = convert_env_vars(env_vars) if isinstance(env_vars, list): return env_vars raise AirflowException(f"Expected dict or list, got {type(env_vars)}")
Convert a PodRuntimeInfoEnv into an k8s.V1EnvVar. :param pod_runtime_info_envs:
def convert_pod_runtime_info_env(pod_runtime_info_envs) -> k8s.V1EnvVar: """ Convert a PodRuntimeInfoEnv into an k8s.V1EnvVar. :param pod_runtime_info_envs: """ return _convert_kube_model_object(pod_runtime_info_envs, k8s.V1EnvVar)
Convert a PodRuntimeInfoEnv into an k8s.V1EnvVar. :param image_pull_secrets:
def convert_image_pull_secrets(image_pull_secrets) -> list[k8s.V1LocalObjectReference]: """ Convert a PodRuntimeInfoEnv into an k8s.V1EnvVar. :param image_pull_secrets: """ if isinstance(image_pull_secrets, str): secrets = image_pull_secrets.split(",") return [k8s.V1LocalObjectReference(name=secret) for secret in secrets] else: return image_pull_secrets
Convert a str into an k8s.V1EnvFromSource. :param configmaps:
def convert_configmap(configmaps) -> k8s.V1EnvFromSource: """ Convert a str into an k8s.V1EnvFromSource. :param configmaps: """ return k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name=configmaps))
Convert a dict into an k8s.V1Affinity.
def convert_affinity(affinity) -> k8s.V1Affinity: """Convert a dict into an k8s.V1Affinity.""" return _convert_from_dict(affinity, k8s.V1Affinity)
Convert a dict into an k8s.V1Toleration.
def convert_toleration(toleration) -> k8s.V1Toleration: """Convert a dict into an k8s.V1Toleration.""" return _convert_from_dict(toleration, k8s.V1Toleration)
Kubernetes operator decorator. This wraps a function to be executed in K8s using KubernetesPodOperator. Also accepts any argument that DockerOperator will via ``kwargs``. Can be reused in a single DAG. :param python_callable: Function to decorate :param multiple_outputs: if set, function return value will be unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys. Defaults to False.
def kubernetes_task( python_callable: Callable | None = None, multiple_outputs: bool | None = None, **kwargs, ) -> TaskDecorator: """Kubernetes operator decorator. This wraps a function to be executed in K8s using KubernetesPodOperator. Also accepts any argument that DockerOperator will via ``kwargs``. Can be reused in a single DAG. :param python_callable: Function to decorate :param multiple_outputs: if set, function return value will be unrolled to multiple XCom values. Dict will unroll to xcom values with keys as XCom keys. Defaults to False. """ return task_decorator_factory( python_callable=python_callable, multiple_outputs=multiple_outputs, decorated_operator_class=_KubernetesDecoratedOperator, **kwargs, )
Generate documentation; used by Sphinx. :meta private:
def _get_parser() -> argparse.ArgumentParser: """ Generate documentation; used by Sphinx. :meta private: """ return KubernetesExecutor._get_parser()
Get base pod from template. Reads either the pod_template_file set in the executor_config or the base pod_template_file set in the airflow.cfg to craft a "base pod" that will be used by the KubernetesExecutor :param pod_template_file: absolute path to a pod_template_file.yaml or None :param kube_config: The KubeConfig class generated by airflow that contains all kube metadata :return: a V1Pod that can be used as the base pod for k8s tasks
def get_base_pod_from_template(pod_template_file: str | None, kube_config: Any) -> k8s.V1Pod: """ Get base pod from template. Reads either the pod_template_file set in the executor_config or the base pod_template_file set in the airflow.cfg to craft a "base pod" that will be used by the KubernetesExecutor :param pod_template_file: absolute path to a pod_template_file.yaml or None :param kube_config: The KubeConfig class generated by airflow that contains all kube metadata :return: a V1Pod that can be used as the base pod for k8s tasks """ if pod_template_file: return PodGenerator.deserialize_model_file(pod_template_file) else: return PodGenerator.deserialize_model_file(kube_config.pod_template_file)
Convert val to bool if can be done with certainty; if we cannot infer intention we return None.
def _get_bool(val) -> bool | None: """Convert val to bool if can be done with certainty; if we cannot infer intention we return None.""" if isinstance(val, bool): return val elif isinstance(val, str): if val.strip().lower() == "true": return True elif val.strip().lower() == "false": return False return None