response
stringlengths
1
33.1k
instruction
stringlengths
22
582k
Delete a fileshare
def delete_fileshare(): """Delete a fileshare""" hook = AzureFileShareHook() hook.delete_share(NAME)
Delete a file at SFTP SERVER
def delete_sftp_file(): """Delete a file at SFTP SERVER""" SFTPHook().delete_file(SFTP_FILE_COMPLETE_PATH)
### TaskFlow API Tutorial Documentation This is a simple data pipeline example which demonstrates the use of the TaskFlow API using three simple tasks for Extract, Transform, and Load. Documentation that goes along with the Airflow TaskFlow API tutorial is located [here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial_taskflow_api.html)
def example_openai_dag(): """ ### TaskFlow API Tutorial Documentation This is a simple data pipeline example which demonstrates the use of the TaskFlow API using three simple tasks for Extract, Transform, and Load. Documentation that goes along with the Airflow TaskFlow API tutorial is located [here](https://airflow.apache.org/docs/apache-airflow/stable/tutorial_taskflow_api.html) """ texts = [ "On Kernel-Target Alignment. We describe a family of global optimization procedures", " that automatically decompose optimization problems into smaller loosely coupled", " problems, then combine the solutions of these with message passing algorithms.", ] @task() def create_embeddings_using_hook(): """ #### Extract task A simple Extract task to get data ready for the rest of the data pipeline. In this case, getting data is simulated by reading from a hardcoded JSON string. """ openai_hook = OpenAIHook() embeddings = openai_hook.create_embeddings(texts[0]) return embeddings @task() def task_to_store_input_text_in_xcom(): return texts[0] # [START howto_operator_openai_embedding] OpenAIEmbeddingOperator( task_id="embedding_using_xcom_data", conn_id="openai_default", input_text=task_to_store_input_text_in_xcom(), model="text-embedding-ada-002", ) OpenAIEmbeddingOperator( task_id="embedding_using_callable", conn_id="openai_default", input_text=input_text_callable( "input_arg1_value", "input2_value", input_kwarg1="input_kwarg1_value", input_kwarg2="input_kwarg2_value", ), model="text-embedding-ada-002", ) OpenAIEmbeddingOperator( task_id="embedding_using_text", conn_id="openai_default", input_text=texts, model="text-embedding-ada-002", ) # [END howto_operator_openai_embedding] create_embeddings_using_hook()
Verify the message in the notebook
def check_notebook(output_notebook, execution_date): """ Verify the message in the notebook """ notebook = sb.read_notebook(output_notebook) message = notebook.scraps["message"] print(f"Message in notebook {message} for {execution_date}") if message.data != f"Ran from Airflow at {execution_date}!": return False return True
Verify the message in the notebook
def check_notebook(inlets, execution_date): """ Verify the message in the notebook """ notebook = sb.read_notebook(inlets[0].url) message = notebook.scraps["message"] print(f"Message in notebook {message} for {execution_date}") if message.data != f"Ran from Airflow at {execution_date}!": return False return True
Example pgvector DAG demonstrating usage of the PgVectorIngestOperator.
def example_pgvector_dag(): """Example pgvector DAG demonstrating usage of the PgVectorIngestOperator.""" @task() def create_postgres_objects(): """ Example task to create PostgreSQL objects including table and installing the vector extension using the PgVectorHook. """ pg_hook = PgVectorHook(postgres_conn_id=POSTGRES_CONN_ID) # Create a table columns = ["id SERIAL PRIMARY KEY", "name VARCHAR(255)", "value INTEGER", "vector_column vector(3)"] pg_hook.create_table(TABLE_NAME, columns) # Create vector extension extension_name = "vector" pg_hook.create_extension(extension_name) # [START howto_operator_pgvector_ingest] pgvector_ingest = PgVectorIngestOperator( task_id="pgvector_ingest", conn_id=POSTGRES_CONN_ID, sql=f"INSERT INTO {TABLE_NAME} (name, value, vector_column) " f"VALUES ('John Doe', 123, '[1.0, 2.0, 3.0]')", ) # [END howto_operator_pgvector_ingest] @task() def cleanup_postgres_objects(): """ Cleanup Postgres objects created in the earlier task. """ pg_hook = PgVectorHook(postgres_conn_id=POSTGRES_CONN_ID) pg_hook.truncate_table(TABLE_NAME) pg_hook.drop_table(TABLE_NAME) create_postgres_objects() >> pgvector_ingest >> cleanup_postgres_objects()
Example pgvector DAG demonstrating usage of the PgVectorIngestOperator.
def example_pgvector_dag(): """Example pgvector DAG demonstrating usage of the PgVectorIngestOperator.""" @task() def create_postgres_objects(): """ Example task to create PostgreSQL objects including table and installing the vector extension using the PgVectorHook. """ from airflow.providers.pgvector.hooks.pgvector import PgVectorHook pg_hook = PgVectorHook(postgres_conn_id=POSTGRES_CONN_ID) # Create a table columns = [ "id SERIAL PRIMARY KEY", "name VARCHAR(255)", "value INTEGER", "vector_column vector(1536)", ] pg_hook.create_table(TABLE_NAME, columns) # Create vector extension extension_name = "vector" pg_hook.create_extension(extension_name) embedd_query = OpenAIEmbeddingOperator( task_id="embedding_using_xcom_data", conn_id="openai_default", input_text="123", model="text-embedding-ada-002", ) pgvector_ingest = PgVectorIngestOperator( task_id="pgvector_ingest", conn_id=POSTGRES_CONN_ID, sql=f"INSERT INTO {TABLE_NAME} (name, value, vector_column) " f"VALUES ('John Doe', '123' ,'{embedd_query.output}')", ) @teardown() @task() def cleanup_postgres_objects(): """ Cleanup Postgres objects created in the earlier task. """ from airflow.providers.pgvector.hooks.pgvector import PgVectorHook pg_hook = PgVectorHook(postgres_conn_id=POSTGRES_CONN_ID) pg_hook.truncate_table(TABLE_NAME) pg_hook.drop_table(TABLE_NAME) create_postgres_objects() >> pgvector_ingest >> cleanup_postgres_objects()
Example DAG which creates embeddings using CohereEmbeddingOperator and the uses WeaviateIngestOperator to insert embeddings to Weaviate .
def example_weaviate_cohere(): """ Example DAG which creates embeddings using CohereEmbeddingOperator and the uses WeaviateIngestOperator to insert embeddings to Weaviate . """ @setup @task def create_weaviate_class(): """ Example task to create class without any Vectorizer. You're expected to provide custom vectors for your data. """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. class_obj = { "class": "Weaviate_example_class", "vectorizer": "none", } weaviate_hook.create_class(class_obj) @setup @task def get_data_to_embed(): import json from pathlib import Path data = json.load(Path("jeopardy_data_without_vectors.json").open()) return [[item["Question"]] for item in data] data_to_embed = get_data_to_embed() embed_data = CohereEmbeddingOperator.partial( task_id="embedding_using_xcom_data", ).expand(input_text=data_to_embed["return_value"]) @task def update_vector_data_in_json(**kwargs): import json from pathlib import Path ti = kwargs["ti"] data = json.load(Path("jeopardy_data_without_vectors.json").open()) embedded_data = ti.xcom_pull(task_ids="embedding_using_xcom_data", key="return_value") for i, vector in enumerate(embedded_data): data[i]["Vector"] = vector[0] return data update_vector_data_in_json = update_vector_data_in_json() perform_ingestion = WeaviateIngestOperator( task_id="perform_ingestion", conn_id="weaviate_default", class_name="Weaviate_example_class", input_json=update_vector_data_in_json["return_value"], ) embed_query = CohereEmbeddingOperator( task_id="embed_query", input_text=["biology"], ) @teardown @task def delete_weaviate_class(): """ Example task to delete a weaviate class """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes(["Weaviate_example_class"]) ( create_weaviate_class() >> embed_data >> update_vector_data_in_json >> perform_ingestion >> embed_query >> delete_weaviate_class() )
Example DAG which uses WeaviateIngestOperator to insert embeddings to Weaviate using dynamic mapping
def example_weaviate_dynamic_mapping_dag(): """ Example DAG which uses WeaviateIngestOperator to insert embeddings to Weaviate using dynamic mapping""" @setup @task def create_weaviate_class(data): """ Example task to create class without any Vectorizer. You're expected to provide custom vectors for your data. """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. class_obj = { "class": data[0], "vectorizer": data[1], } weaviate_hook.create_class(class_obj) @setup @task def get_data_to_ingest(): import json from pathlib import Path file1 = json.load(Path("jeopardy_data_with_vectors.json").open()) file2 = json.load(Path("jeopardy_data_without_vectors.json").open()) return [file1, file2] get_data_to_ingest = get_data_to_ingest() perform_ingestion = WeaviateIngestOperator.partial( task_id="perform_ingestion", conn_id="weaviate_default", ).expand( class_name=["example1", "example2"], input_data=get_data_to_ingest["return_value"], ) @teardown @task def delete_weaviate_class(class_name): """ Example task to delete a weaviate class """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes([class_name]) ( create_weaviate_class.expand(data=[["example1", "none"], ["example2", "text2vec-openai"]]) >> perform_ingestion >> delete_weaviate_class.expand(class_name=["example1", "example2"]) )
Example DAG which creates embeddings using OpenAIEmbeddingOperator and the uses WeaviateIngestOperator to insert embeddings to Weaviate .
def example_weaviate_openai(): """ Example DAG which creates embeddings using OpenAIEmbeddingOperator and the uses WeaviateIngestOperator to insert embeddings to Weaviate . """ @setup @task def create_weaviate_class(): """ Example task to create class without any Vectorizer. You're expected to provide custom vectors for your data. """ weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. class_obj = { "class": "Weaviate_example_class", "vectorizer": "none", } weaviate_hook.create_class(class_obj) @setup @task def get_data_to_embed(): data = json.load(Path("jeopardy_data_without_vectors.json").open()) return [item["Question"] for item in data] data_to_embed = get_data_to_embed() embed_data = OpenAIEmbeddingOperator.partial( task_id="embedding_using_xcom_data", conn_id="openai_default", model="text-embedding-ada-002", ).expand(input_text=data_to_embed["return_value"]) @task def update_vector_data_in_json(**kwargs): ti = kwargs["ti"] data = json.load(Path("jeopardy_data_without_vectors.json").open()) embedded_data = ti.xcom_pull(task_ids="embedding_using_xcom_data", key="return_value") for i, vector in enumerate(embedded_data): data[i]["Vector"] = vector return data update_vector_data_in_json = update_vector_data_in_json() perform_ingestion = WeaviateIngestOperator( task_id="perform_ingestion", conn_id="weaviate_default", class_name="Weaviate_example_class", input_json=update_vector_data_in_json["return_value"], ) embed_query = OpenAIEmbeddingOperator( task_id="embed_query", conn_id="openai_default", input_text="biology", model="text-embedding-ada-002", ) @task def query_weaviate(**kwargs): ti = kwargs["ti"] query_vector = ti.xcom_pull(task_ids="embed_query", key="return_value") weaviate_hook = WeaviateHook() properties = ["question", "answer", "category"] response = weaviate_hook.query_with_vector(query_vector, "Weaviate_example_class", *properties) assert ( "In 1953 Watson & Crick built a model" in response["data"]["Get"]["Weaviate_example_class"][0]["question"] ) @teardown @task def delete_weaviate_class(): """ Example task to delete a weaviate class """ weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes(["Weaviate_example_class"]) ( create_weaviate_class() >> embed_data >> update_vector_data_in_json >> perform_ingestion >> embed_query >> query_weaviate() >> delete_weaviate_class() )
Example Weaviate DAG demonstrating usage of the operator.
def example_weaviate_using_operator(): """ Example Weaviate DAG demonstrating usage of the operator. """ # Example tasks to create a Weaviate class without vectorizers, store data with custom vectors in XCOM, and call # WeaviateIngestOperator to ingest data with those custom vectors. @task() def create_class_without_vectorizer(): """ Example task to create class without any Vectorizer. You're expected to provide custom vectors for your data. """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. class_obj = { "class": "QuestionWithoutVectorizerUsingOperator", "vectorizer": "none", } weaviate_hook.create_class(class_obj) @task(trigger_rule="all_done") def store_data_with_vectors_in_xcom(): return sample_data_with_vector # [START howto_operator_weaviate_embedding_and_ingest_xcom_data_with_vectors] batch_data_with_vectors_xcom_data = WeaviateIngestOperator( task_id="batch_data_with_vectors_xcom_data", conn_id="weaviate_default", class_name="QuestionWithoutVectorizerUsingOperator", input_json=store_data_with_vectors_in_xcom(), trigger_rule="all_done", ) # [END howto_operator_weaviate_embedding_and_ingest_xcom_data_with_vectors] # [START howto_operator_weaviate_embedding_and_ingest_callable_data_with_vectors] batch_data_with_vectors_callable_data = WeaviateIngestOperator( task_id="batch_data_with_vectors_callable_data", conn_id="weaviate_default", class_name="QuestionWithoutVectorizerUsingOperator", input_json=get_data_with_vectors(), trigger_rule="all_done", ) # [END howto_operator_weaviate_embedding_and_ingest_callable_data_with_vectors] # Example tasks to create class with OpenAI vectorizer, store data without vectors in XCOM, and call # WeaviateIngestOperator to ingest data by internally generating OpenAI vectors while ingesting. @task() def create_class_with_vectorizer(): """ Example task to create class with OpenAI Vectorizer responsible for vectorining data using Weaviate cluster. """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() class_obj = { "class": "QuestionWithOpenAIVectorizerUsingOperator", "description": "Information from a Jeopardy! question", # description of the class "properties": [ { "dataType": ["text"], "description": "The question", "name": "question", }, { "dataType": ["text"], "description": "The answer", "name": "answer", }, { "dataType": ["text"], "description": "The category", "name": "category", }, ], "vectorizer": "text2vec-openai", } weaviate_hook.create_class(class_obj) @task() def create_class_for_doc_data_with_vectorizer(): """ Example task to create class with OpenAI Vectorizer responsible for vectorining data using Weaviate cluster. """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() class_obj = { "class": "QuestionWithOpenAIVectorizerUsingOperatorDocs", "description": "Information from a Jeopardy! question", # description of the class "properties": [ { "dataType": ["text"], "description": "The question", "name": "question", }, { "dataType": ["text"], "description": "The answer", "name": "answer", }, { "dataType": ["text"], "description": "The category", "name": "category", }, { "dataType": ["text"], "description": "URL for source document", "name": "docLink", }, ], "vectorizer": "text2vec-openai", } weaviate_hook.create_class(class_obj) @task(trigger_rule="all_done") def store_data_without_vectors_in_xcom(): import json from pathlib import Path data = json.load(Path("jeopardy_data_without_vectors.json").open()) return data @task(trigger_rule="all_done") def store_doc_data_without_vectors_in_xcom(): import json from pathlib import Path data = json.load(Path("jeopardy_doc_data_without_vectors.json").open()) return data xcom_data_without_vectors = store_data_without_vectors_in_xcom() xcom_doc_data_without_vectors = store_doc_data_without_vectors_in_xcom() # [START howto_operator_weaviate_ingest_xcom_data_without_vectors] batch_data_without_vectors_xcom_data = WeaviateIngestOperator( task_id="batch_data_without_vectors_xcom_data", conn_id="weaviate_default", class_name="QuestionWithOpenAIVectorizerUsingOperator", input_json=xcom_data_without_vectors["return_value"], trigger_rule="all_done", ) # [END howto_operator_weaviate_ingest_xcom_data_without_vectors] # [START howto_operator_weaviate_ingest_callable_data_without_vectors] batch_data_without_vectors_callable_data = WeaviateIngestOperator( task_id="batch_data_without_vectors_callable_data", conn_id="weaviate_default", class_name="QuestionWithOpenAIVectorizerUsingOperator", input_json=get_data_without_vectors(), trigger_rule="all_done", ) # [END howto_operator_weaviate_ingest_callable_data_without_vectors] create_or_replace_document_objects_without_vectors = WeaviateDocumentIngestOperator( task_id="create_or_replace_document_objects_without_vectors_xcom_data", existing="replace", document_column="docLink", conn_id="weaviate_default", class_name="QuestionWithOpenAIVectorizerUsingOperatorDocs", batch_config_params={"batch_size": 1000}, input_data=xcom_doc_data_without_vectors["return_value"], trigger_rule="all_done", ) @teardown @task def delete_weaviate_class_Vector(): """ Example task to delete a weaviate class """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes( [ "QuestionWithOpenAIVectorizerUsingOperator", ] ) @teardown @task def delete_weaviate_class_without_Vector(): """ Example task to delete a weaviate class """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes( [ "QuestionWithoutVectorizerUsingOperator", ] ) @teardown @task def delete_weaviate_docs_class_without_Vector(): """ Example task to delete a weaviate class """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes(["QuestionWithOpenAIVectorizerUsingOperatorDocs"]) ( create_class_without_vectorizer() >> [batch_data_with_vectors_xcom_data, batch_data_with_vectors_callable_data] >> delete_weaviate_class_without_Vector() ) ( create_class_for_doc_data_with_vectorizer() >> [create_or_replace_document_objects_without_vectors] >> delete_weaviate_docs_class_without_Vector() ) ( create_class_with_vectorizer() >> [ batch_data_without_vectors_xcom_data, batch_data_without_vectors_callable_data, ] >> delete_weaviate_class_Vector() )
Example Weaviate DAG demonstrating usage of the hook.
def example_weaviate_dag_using_hook(): """Example Weaviate DAG demonstrating usage of the hook.""" @task() def create_class_with_vectorizer(): """ Example task to create class with OpenAI Vectorizer responsible for vectorining data using Weaviate cluster. """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() class_obj = { "class": "QuestionWithOpenAIVectorizerUsingHook", "description": "Information from a Jeopardy! question", # description of the class "properties": [ { "dataType": ["text"], "description": "The question", "name": "question", }, { "dataType": ["text"], "description": "The answer", "name": "answer", }, { "dataType": ["text"], "description": "The category", "name": "category", }, ], "vectorizer": "text2vec-openai", } weaviate_hook.create_class(class_obj) @task() def create_class_without_vectorizer(): """ Example task to create class without any Vectorizer. You're expected to provide custom vectors for your data. """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. class_obj = { "class": "QuestionWithoutVectorizerUsingHook", "vectorizer": "none", } weaviate_hook.create_class(class_obj) @task(trigger_rule="all_done") def store_data_without_vectors_in_xcom(): import json from pathlib import Path data = json.load(Path("jeopardy_data_without_vectors.json").open()) return data @task(trigger_rule="all_done") def store_data_with_vectors_in_xcom(): import json from pathlib import Path data = json.load(Path("jeopardy_data_with_vectors.json").open()) return data @task(trigger_rule="all_done") def batch_data_without_vectors(data: list): from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() weaviate_hook.batch_data("QuestionWithOpenAIVectorizerUsingHook", data) @task(trigger_rule="all_done") def batch_data_with_vectors(data: list): from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() weaviate_hook.batch_data("QuestionWithoutVectorizerUsingHook", data) @teardown @task def delete_weaviate_class_Vector(): """ Example task to delete a weaviate class """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes(["QuestionWithOpenAIVectorizerUsingHook"]) @teardown @task def delete_weaviate_class_without_Vector(): """ Example task to delete a weaviate class """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes(["QuestionWithoutVectorizerUsingHook"]) data_with_vectors = store_data_with_vectors_in_xcom() ( create_class_without_vectorizer() >> batch_data_with_vectors(data_with_vectors["return_value"]) >> delete_weaviate_class_Vector() ) data_without_vectors = store_data_without_vectors_in_xcom() ( create_class_with_vectorizer() >> batch_data_without_vectors(data_without_vectors["return_value"]) >> delete_weaviate_class_without_Vector() )
Example DAG which uses WeaviateIngestOperator to insert embeddings to Weaviate with vectorizer and then query to verify the response .
def example_weaviate_vectorizer_dag(): """ Example DAG which uses WeaviateIngestOperator to insert embeddings to Weaviate with vectorizer and then query to verify the response . """ @setup @task def create_weaviate_class(): """ Example task to create class without any Vectorizer. You're expected to provide custom vectors for your data. """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. class_obj = { "class": class_name, "vectorizer": "text2vec-openai", } weaviate_hook.create_class(class_obj) @setup @task def get_data_to_ingest(): import json from pathlib import Path data = json.load(Path("jeopardy_data_without_vectors.json").open()) return data data_to_ingest = get_data_to_ingest() perform_ingestion = WeaviateIngestOperator( task_id="perform_ingestion", conn_id="weaviate_default", class_name=class_name, input_data=data_to_ingest["return_value"], ) @task def query_weaviate(): from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() properties = ["question", "answer", "category"] response = weaviate_hook.query_without_vector( "biology", "Weaviate_with_vectorizer_example_class", *properties ) assert "In 1953 Watson & Crick built a model" in response["data"]["Get"][class_name][0]["question"] @teardown @task def delete_weaviate_class(): """ Example task to delete a weaviate class """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes([class_name]) delete_weaviate_class = delete_weaviate_class() create_weaviate_class() >> perform_ingestion >> query_weaviate() >> delete_weaviate_class
Example DAG which uses WeaviateIngestOperator to insert embeddings to Weaviate without vectorizer and then query to verify the response
def example_weaviate_without_vectorizer_dag(): """ Example DAG which uses WeaviateIngestOperator to insert embeddings to Weaviate without vectorizer and then query to verify the response """ @setup @task def create_weaviate_class(): """ Example task to create class without any Vectorizer. You're expected to provide custom vectors for your data. """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. class_obj = { "class": class_name, "vectorizer": "none", } weaviate_hook.create_class(class_obj) @setup @task def get_data_without_vectors(): import json from pathlib import Path data = json.load(Path("jeopardy_data_with_vectors.json").open()) return data data_to_ingest = get_data_without_vectors() perform_ingestion = WeaviateIngestOperator( task_id="perform_ingestion", conn_id="weaviate_default", class_name=class_name, input_data=data_to_ingest["return_value"], ) embedd_query = OpenAIEmbeddingOperator( task_id="embedd_query", conn_id="openai_default", input_text="biology", model="text-embedding-ada-002", ) @task def query_weaviate(**kwargs): from airflow.providers.weaviate.hooks.weaviate import WeaviateHook ti = kwargs["ti"] query_vector = ti.xcom_pull(task_ids="embedd_query", key="return_value") weaviate_hook = WeaviateHook() properties = ["question", "answer", "category"] response = weaviate_hook.query_with_vector( query_vector, "Weaviate_example_without_vectorizer_class", *properties ) assert "In 1953 Watson & Crick built a model" in response["data"]["Get"][class_name][0]["question"] @teardown @task def delete_weaviate_class(): """ Example task to delete a weaviate class """ from airflow.providers.weaviate.hooks.weaviate import WeaviateHook weaviate_hook = WeaviateHook() # Class definition object. Weaviate's autoschema feature will infer properties when importing. weaviate_hook.delete_classes([class_name]) ( create_weaviate_class() >> perform_ingestion >> embedd_query >> query_weaviate() >> delete_weaviate_class() )
Watcher task raises an AirflowException and is used to 'watch' tasks for failures and propagates fail status to the whole DAG Run
def watcher(): """Watcher task raises an AirflowException and is used to 'watch' tasks for failures and propagates fail status to the whole DAG Run""" raise AirflowException("Failing task because one or more upstream tasks failed.")
Initialize ``BaseTaskRunner`` might have side effect to another tests. This fixture reset back logging to default after execution of separate module in this test package.
def reset_to_default_logging(): """ Initialize ``BaseTaskRunner`` might have side effect to another tests. This fixture reset back logging to default after execution of separate module in this test package. """ yield airflow_logger = logging.getLogger("airflow") airflow_logger.handlers = [] dictConfig(DEFAULT_LOGGING_CONFIG) get_listener_manager().clear()
Set `airflow.task` logger to propagate. Apparently, caplog doesn't work if you don't propagate messages to root. But the normal behavior of the `airflow.task` logger is not to propagate. When freshly configured, the logger is set to propagate. However, ordinarily when set_context is called, this is set to False. To override this behavior, so that the messages make it to caplog, we must tell the handler to maintain its current setting.
def propagate_task_logger(): """ Set `airflow.task` logger to propagate. Apparently, caplog doesn't work if you don't propagate messages to root. But the normal behavior of the `airflow.task` logger is not to propagate. When freshly configured, the logger is set to propagate. However, ordinarily when set_context is called, this is set to False. To override this behavior, so that the messages make it to caplog, we must tell the handler to maintain its current setting. """ logger = logging.getLogger("airflow.task") h = logger.handlers[0] assert isinstance(h, FileTaskHandler) # just to make sure / document _propagate = h.maintain_propagate if _propagate is False: h.maintain_propagate = True try: yield finally: if _propagate is False: h.maintain_propagate = _propagate
Helper function to create a client with a temporary user which will be deleted once done
def create_test_client(app, user_name, role_name, permissions): """ Helper function to create a client with a temporary user which will be deleted once done """ client = app.test_client() with create_user_scope(app, username=user_name, role_name=role_name, permissions=permissions) as _: resp = client.post("/login/", data={"username": user_name, "password": user_name}) assert resp.status_code == 302 yield client
Helper function designed to be used with pytest fixture mainly. It will create a user and provide it for the fixture via YIELD (generator) then will tidy up once test is complete
def create_user_scope(app, username, **kwargs): """ Helper function designed to be used with pytest fixture mainly. It will create a user and provide it for the fixture via YIELD (generator) then will tidy up once test is complete """ test_user = create_user(app, username, **kwargs) try: yield test_user finally: delete_user(app, username)
Asserts that the number of queries is as expected with the margin applied The margin is helpful in case of complex cases where we do not want to change it every time we changed queries, but we want to catch cases where we spin out of control :param expected_count: expected number of queries :param message_fmt: message printed optionally if the number is exceeded :param margin: margin to add to expected number of calls
def assert_queries_count(expected_count: int, message_fmt: str | None = None, margin: int = 0): """ Asserts that the number of queries is as expected with the margin applied The margin is helpful in case of complex cases where we do not want to change it every time we changed queries, but we want to catch cases where we spin out of control :param expected_count: expected number of queries :param message_fmt: message printed optionally if the number is exceeded :param margin: margin to add to expected number of calls """ with count_queries() as result: yield None count = sum(result.values()) if count > expected_count + margin: message_fmt = ( message_fmt or "The expected number of db queries is {expected_count} with extra margin: {margin}. " "The current number is {current_count}.\n\n" "Recorded query locations:" ) message = message_fmt.format(current_count=count, expected_count=expected_count, margin=margin) for location, count in result.items(): message += f"\n\t{location}:\t{count}" raise AssertionError(message)
Context manager to provide a temporary value for wasb_default connection :param key_file_path: Path to file with wasb_default credentials .json file.
def provide_wasb_default_connection(key_file_path: str): """ Context manager to provide a temporary value for wasb_default connection :param key_file_path: Path to file with wasb_default credentials .json file. """ if not key_file_path.endswith(".json"): raise AirflowException("Use a JSON key file.") with open(key_file_path) as credentials: creds = json.load(credentials) conn = Connection( conn_id=WASB_CONNECTION_ID, conn_type="wasb", host=creds.get("host", None), login=creds.get("login", None), password=creds.get("password", None), extra=json.dumps(creds.get("extra", None)), ) with patch_environ({f"AIRFLOW_CONN_{conn.conn_id.upper()}": conn.get_uri()}): yield
Context manager to provide a temporary value for azure_data_lake_default connection :param key_file_path: Path to file with azure_data_lake_default credentials .json file.
def provide_azure_data_lake_default_connection(key_file_path: str): """ Context manager to provide a temporary value for azure_data_lake_default connection :param key_file_path: Path to file with azure_data_lake_default credentials .json file. """ required_fields = {"login", "password", "extra"} if not key_file_path.endswith(".json"): raise AirflowException("Use a JSON key file.") with open(key_file_path) as credentials: creds = json.load(credentials) missing_keys = required_fields - creds.keys() if missing_keys: message = f"{missing_keys} fields are missing" raise AirflowException(message) conn = Connection( conn_id=DATA_LAKE_CONNECTION_ID, conn_type=DATA_LAKE_CONNECTION_TYPE, host=creds.get("host", None), login=creds.get("login", None), password=creds.get("password", None), extra=json.dumps(creds.get("extra", None)), ) with patch_environ({f"AIRFLOW_CONN_{conn.conn_id.upper()}": conn.get_uri()}): yield
Temporarily patches env vars, restoring env as it was after context exit. Example: with env_vars({'AIRFLOW_CONN_AWS_DEFAULT': 's3://@'}): # now we have an aws default connection available
def env_vars(overrides): """ Temporarily patches env vars, restoring env as it was after context exit. Example: with env_vars({'AIRFLOW_CONN_AWS_DEFAULT': 's3://@'}): # now we have an aws default connection available """ orig_vars = {} new_vars = [] for env, value in overrides.items(): if env in os.environ: orig_vars[env] = os.environ.pop(env, "") else: new_vars.append(env) os.environ[env] = value try: yield finally: for env, value in orig_vars.items(): os.environ[env] = value for env in new_vars: os.environ.pop(env)
Returns path full path to provided GCP key. :param key: Name of the GCP key, for example ``my_service.json`` :returns: Full path to the key
def resolve_full_gcp_key_path(key: str) -> str: """ Returns path full path to provided GCP key. :param key: Name of the GCP key, for example ``my_service.json`` :returns: Full path to the key """ path = os.environ.get("CREDENTIALS_DIR", "/files/airflow-breeze-config/keys") key = os.path.join(path, key) return key
Context manager that provides: - GCP credentials for application supporting `Application Default Credentials (ADC) strategy <https://cloud.google.com/docs/authentication/production>`__. - temporary value of :envvar:`AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT` variable - the ``gcloud`` config directory isolated from user configuration Moreover it resolves full path to service keys so user can pass ``myservice.json`` as ``key_file_path``. :param key_file_path: Path to file with GCP credentials .json file. :param scopes: OAuth scopes for the connection :param project_id: The id of GCP project for the connection. Default: ``os.environ["GCP_PROJECT_ID"]`` or None
def provide_gcp_context( key_file_path: str | None = None, scopes: Sequence | None = None, project_id: str | None = None, ): """ Context manager that provides: - GCP credentials for application supporting `Application Default Credentials (ADC) strategy <https://cloud.google.com/docs/authentication/production>`__. - temporary value of :envvar:`AIRFLOW_CONN_GOOGLE_CLOUD_DEFAULT` variable - the ``gcloud`` config directory isolated from user configuration Moreover it resolves full path to service keys so user can pass ``myservice.json`` as ``key_file_path``. :param key_file_path: Path to file with GCP credentials .json file. :param scopes: OAuth scopes for the connection :param project_id: The id of GCP project for the connection. Default: ``os.environ["GCP_PROJECT_ID"]`` or None """ key_file_path = resolve_full_gcp_key_path(key_file_path) # type: ignore if project_id is None: project_id = os.environ.get("GCP_PROJECT_ID") with provide_gcp_conn_and_credentials( key_file_path, scopes, project_id ), tempfile.TemporaryDirectory() as gcloud_config_tmp, mock.patch.dict( "os.environ", {CLOUD_SDK_CONFIG_DIR: gcloud_config_tmp} ): executor = CommandExecutor() if key_file_path: executor.execute_cmd( [ "gcloud", "auth", "activate-service-account", f"--key-file={key_file_path}", ] ) if project_id: executor.execute_cmd(["gcloud", "config", "set", "core/project", project_id]) yield
Replaces last n occurrences of the old string with the new one within the string provided :param s: string to replace occurrences with :param old: old string :param new: new string :param number_of_occurrences: how many occurrences should be replaced :return: string with last n occurrences replaced
def last_replace(s, old, new, number_of_occurrences): """ Replaces last n occurrences of the old string with the new one within the string provided :param s: string to replace occurrences with :param old: old string :param new: new string :param number_of_occurrences: how many occurrences should be replaced :return: string with last n occurrences replaced """ list_of_components = s.rsplit(old, number_of_occurrences) return new.join(list_of_components)
Prints all test cases read from the xunit test file :param xunit_test_file_path: path of the xunit file :return: None
def print_all_cases(xunit_test_file_path): """ Prints all test cases read from the xunit test file :param xunit_test_file_path: path of the xunit file :return: None """ with open(xunit_test_file_path) as file: text = file.read() root = ElementTree.fromstring(text) test_cases = root.findall(".//testcase") classes = set() modules = set() for test_case in test_cases: the_module = test_case["classname"].rpartition(".")[0] the_class = last_replace(test_case.get("classname"), ".", ":", 1) test_method = test_case.get("name") modules.add(the_module) classes.add(the_class) print(the_class + "." + test_method) for the_class in classes: print(the_class) for the_module in modules: print(the_module)
Protects the initial state and sets the default state for the airflow.plugins module. You can also overwrite variables by passing a keyword argument. airflow.plugins_manager uses many global variables. To avoid side effects, this decorator performs the following operations: 1. saves variables state, 2. set variables to default value, 3. executes context code, 4. restores the state of variables to the state from point 1. Use this context if you want your test to not have side effects in airflow.plugins_manager, and other tests do not affect the results of this test.
def mock_plugin_manager(plugins=None, **kwargs): """ Protects the initial state and sets the default state for the airflow.plugins module. You can also overwrite variables by passing a keyword argument. airflow.plugins_manager uses many global variables. To avoid side effects, this decorator performs the following operations: 1. saves variables state, 2. set variables to default value, 3. executes context code, 4. restores the state of variables to the state from point 1. Use this context if you want your test to not have side effects in airflow.plugins_manager, and other tests do not affect the results of this test. """ illegal_arguments = set(kwargs.keys()) - set(PLUGINS_MANAGER_NULLABLE_ATTRIBUTES) - {"import_errors"} if illegal_arguments: raise TypeError( f"TypeError: mock_plugin_manager got an unexpected keyword arguments: {illegal_arguments}" ) # Handle plugins specially with ExitStack() as exit_stack: def mock_loaded_plugins(): exit_stack.enter_context(mock.patch("airflow.plugins_manager.plugins", plugins or [])) exit_stack.enter_context( mock.patch( "airflow.plugins_manager.load_plugins_from_plugin_directory", side_effect=mock_loaded_plugins ) ) for attr in PLUGINS_MANAGER_NULLABLE_ATTRIBUTES: exit_stack.enter_context(mock.patch(f"airflow.plugins_manager.{attr}", kwargs.get(attr))) # Always start the block with an empty plugins, so ensure_plugins_loaded runs. exit_stack.enter_context(mock.patch("airflow.plugins_manager.plugins", None)) exit_stack.enter_context( mock.patch("airflow.plugins_manager.import_errors", kwargs.get("import_errors", {})) ) yield
Returns true if importable python object is there.
def object_exists(path: str): """Returns true if importable python object is there.""" from airflow.utils.module_loading import import_string try: import_string(path) return True except ImportError: return False
Returns provider version given provider package name. Example:: if provider_version("apache-airflow-providers-cncf-kubernetes") >= (6, 0): raise Exception( "You must now remove `get_kube_client` from PodManager " "and make kube_client a required argument." )
def get_provider_version(provider_name): """ Returns provider version given provider package name. Example:: if provider_version("apache-airflow-providers-cncf-kubernetes") >= (6, 0): raise Exception( "You must now remove `get_kube_client` from PodManager " "and make kube_client a required argument." ) """ from airflow.providers_manager import ProvidersManager info = ProvidersManager().providers[provider_name] return semver.VersionInfo.parse(info.version)
Initializes authentication backend
def init_app(_): """Initializes authentication backend"""
Decorator for functions that require authentication
def requires_authentication(function: T): """Decorator for functions that require authentication""" @wraps(function) def decorated(*args, **kwargs): user_id = request.remote_user if not user_id: log.debug("Missing REMOTE_USER.") return Response("Forbidden", 403) log.debug("Looking for user: %s", user_id) user = _lookup_user(user_id) if not user: return Response("Forbidden", 403) log.debug("Found user: %s", user) login_user(user, remember=False) return function(*args, **kwargs) return cast(T, decorated)
Context manager that provides a temporary value of SALESFORCE_DEFAULT connection. :param key_file_path: Path to file with SALESFORCE credentials .json file.
def provide_salesforce_connection(key_file_path: str): """ Context manager that provides a temporary value of SALESFORCE_DEFAULT connection. :param key_file_path: Path to file with SALESFORCE credentials .json file. """ if not key_file_path.endswith(".json"): raise AirflowException("Use a JSON key file.") with open(key_file_path) as credentials: creds = json.load(credentials) missing_keys = CONFIG_REQUIRED_FIELDS - creds.keys() if missing_keys: message = f"{missing_keys} fields are missing" raise AirflowException(message) conn = Connection( conn_id=SALESFORCE_CONNECTION_ID, conn_type=CONNECTION_TYPE, host=creds["host"], login=creds["login"], password=creds["password"], extra=json.dumps({"security_token": creds["security_token"]}), ) with patch_environ({f"AIRFLOW_CONN_{conn.conn_id.upper()}": conn.get_uri()}): yield
Context manager to provide a temporary value for sftp_default connection :param key_file_path: Path to file with sftp_default credentials .json file.
def provide_sftp_default_connection(key_file_path: str): """ Context manager to provide a temporary value for sftp_default connection :param key_file_path: Path to file with sftp_default credentials .json file. """ if not key_file_path.endswith(".json"): raise AirflowException("Use a JSON key file.") with open(key_file_path) as credentials: creds = json.load(credentials) conn = Connection( conn_id=SFTP_CONNECTION_ID, conn_type="ssh", port=creds.get("port", None), host=creds.get("host", None), login=creds.get("login", None), password=creds.get("password", None), extra=json.dumps(creds.get("extra", None)), ) with patch_environ({f"AIRFLOW_CONN_{conn.conn_id.upper()}": conn.get_uri()}): yield
Returns LOGS folder specified in current Airflow config.
def resolve_logs_folder() -> str: """ Returns LOGS folder specified in current Airflow config. """ config_file = get_airflow_config(AIRFLOW_HOME) conf = AirflowConfigParser() conf.read(config_file) try: return get_default_logs_if_none(conf.get("logging", "base_log_folder")) except AirflowException: try: return get_default_logs_if_none(conf.get("core", "base_log_folder")) except AirflowException: pass return get_default_logs_if_none(None)
Calculates the amount of difference in free memory before and after script execution. In other words, how much data the code snippet has used up memory. :param human_readable: If yes, the result will be displayed in human readable units. If no, the result will be displayed as bytes. :param gc_collect: If yes, the garbage collector will be started before checking used memory.
def trace_memory(human_readable=True, gc_collect=False): """ Calculates the amount of difference in free memory before and after script execution. In other words, how much data the code snippet has used up memory. :param human_readable: If yes, the result will be displayed in human readable units. If no, the result will be displayed as bytes. :param gc_collect: If yes, the garbage collector will be started before checking used memory. """ if gc_collect: gc.collect() before = _get_process_memory() result = TraceMemoryResult() try: yield result finally: if gc_collect: gc.collect() after = _get_process_memory() diff = after - before result.before = before result.after = after result.value = diff if human_readable: human_diff = _human_readable_size(diff) print(f"Memory: {human_diff}") else: print(f"Memory: {diff} bytes")
This decorator provide deterministic profiling. It generate and save flame graph to file. It uses``pyspy`` internally. Running py-spy inside of a docker container will also usually bring up a permissions denied error even when running as root. This error is caused by docker restricting the process_vm_readv system call we are using. This can be overridden by setting --cap-add SYS_PTRACE when starting the docker container. Alternatively you can edit the docker-compose yaml file .. code-block:: yaml your_service: cap_add: - SYS_PTRACE In the case of Airflow Breeze, you should modify the ``tests/utils/perf/perf_kit/python.py`` file.
def pyspy(): """ This decorator provide deterministic profiling. It generate and save flame graph to file. It uses``pyspy`` internally. Running py-spy inside of a docker container will also usually bring up a permissions denied error even when running as root. This error is caused by docker restricting the process_vm_readv system call we are using. This can be overridden by setting --cap-add SYS_PTRACE when starting the docker container. Alternatively you can edit the docker-compose yaml file .. code-block:: yaml your_service: cap_add: - SYS_PTRACE In the case of Airflow Breeze, you should modify the ``tests/utils/perf/perf_kit/python.py`` file. """ pid = str(os.getpid()) suffix = datetime.datetime.now().isoformat() filename = f"{PYSPY_OUTPUT}/flame-{suffix}-{pid}.html" pyspy_pid = os.spawnlp( os.P_NOWAIT, "sudo", "sudo", "py-spy", "record", "--idle", "-o", filename, "-p", pid ) try: yield finally: os.kill(pyspy_pid, signal.SIGINT) print(f"Report saved to: {filename}")
This decorator provide deterministic profiling. It uses ``cProfile`` internally. It generates statistic and print on the screen.
def profiled(print_callers=False): """ This decorator provide deterministic profiling. It uses ``cProfile`` internally. It generates statistic and print on the screen. """ profile = cProfile.Profile() profile.enable() try: yield finally: profile.disable() stat = StringIO() pstatistics = pstats.Stats(profile, stream=stat).sort_stats("cumulative") if print_callers: pstatistics.print_callers() else: pstatistics.print_stats() print(stat.getvalue())
Measures code execution time. :param repeat_count: If passed, the result will be divided by the value.
def timing(repeat_count: int = 1): """ Measures code execution time. :param repeat_count: If passed, the result will be divided by the value. """ result = TimingResult() result.start_time = time.monotonic() try: yield result finally: end_time = time.monotonic() diff = (end_time - result.start_time) * 1000.0 result.end_time = end_time if repeat_count == 1: result.value = diff print(f"Loop time: {diff:.3f} ms") else: average_time = diff / repeat_count result.value = average_time print(f"Average time: {average_time:.3f} ms")
Function decorators that repeat function many times. :param repeat_count: The repeat count
def repeat(repeat_count=5): """ Function decorators that repeat function many times. :param repeat_count: The repeat count """ def repeat_decorator(f): @functools.wraps(f) def wrap(*args, **kwargs): last_result = None for _ in range(repeat_count): last_result = f(*args, **kwargs) return last_result return wrap return repeat_decorator
Executes code only limited seconds. If the code does not end during this time, it will be interrupted. :param seconds: Number of seconds
def timeout(seconds=1): """ Executes code only limited seconds. If the code does not end during this time, it will be interrupted. :param seconds: Number of seconds """ def handle_timeout(signum, frame): raise TimeoutException("Process timed out.") try: signal.signal(signal.SIGALRM, handle_timeout) signal.alarm(seconds) except ValueError: raise Exception("timeout can't be used in the current context") try: yield except TimeoutException: print("Process timed out.") finally: try: signal.alarm(0) except ValueError: raise Exception("timeout can't be used in the current context")
Mock serialization function for Timetable objects. :param timetable: The Timetable object to serialize.
def serialize_timetable(timetable: Timetable) -> str: """ Mock serialization function for Timetable objects. :param timetable: The Timetable object to serialize. """ return "serialized_timetable"
Mock deserialization function for Timetable objects. :param serialized: The serialized data of the timetable.
def deserialize_timetable(serialized: str) -> MockTimetable: """ Mock deserialization function for Timetable objects. :param serialized: The serialized data of the timetable. """ return MockTimetable()
Pytest fixture for creating a MockTimetable object.
def test_timetable() -> MockTimetable: """Pytest fixture for creating a MockTimetable object.""" return MockTimetable()
Pytest fixture for creating a list of Dataset objects.
def test_datasets() -> list[Dataset]: """Pytest fixture for creating a list of Dataset objects.""" return [Dataset("test_dataset")]
Pytest fixture for creating a DatasetTimetable object. :param test_timetable: The test timetable instance. :param test_datasets: A list of Dataset instances.
def dataset_timetable(test_timetable: MockTimetable, test_datasets: list[Dataset]) -> DatasetOrTimeSchedule: """ Pytest fixture for creating a DatasetTimetable object. :param test_timetable: The test timetable instance. :param test_datasets: A list of Dataset instances. """ return DatasetOrTimeSchedule(timetable=test_timetable, datasets=test_datasets)
Tests the serialization method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test. :param monkeypatch: The monkeypatch fixture from pytest.
def test_serialization(dataset_timetable: DatasetOrTimeSchedule, monkeypatch: Any) -> None: """ Tests the serialization method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test. :param monkeypatch: The monkeypatch fixture from pytest. """ monkeypatch.setattr( "airflow.serialization.serialized_objects.encode_timetable", lambda x: "mock_serialized_timetable" ) serialized = dataset_timetable.serialize() assert serialized == { "timetable": "mock_serialized_timetable", }
Tests the deserialization method of DatasetTimetable. :param monkeypatch: The monkeypatch fixture from pytest.
def test_deserialization(monkeypatch: Any) -> None: """ Tests the deserialization method of DatasetTimetable. :param monkeypatch: The monkeypatch fixture from pytest. """ monkeypatch.setattr( "airflow.serialization.serialized_objects.decode_timetable", lambda x: MockTimetable() ) mock_serialized_data = {"timetable": "mock_serialized_timetable", "datasets": [{"uri": "test_dataset"}]} deserialized = DatasetOrTimeSchedule.deserialize(mock_serialized_data) assert isinstance(deserialized, DatasetOrTimeSchedule)
Tests the infer_manual_data_interval method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test.
def test_infer_manual_data_interval(dataset_timetable: DatasetOrTimeSchedule) -> None: """ Tests the infer_manual_data_interval method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test. """ run_after = DateTime.now() result = dataset_timetable.infer_manual_data_interval(run_after=run_after) assert isinstance(result, DataInterval)
Tests the next_dagrun_info method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test.
def test_next_dagrun_info(dataset_timetable: DatasetOrTimeSchedule) -> None: """ Tests the next_dagrun_info method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test. """ last_interval = DataInterval.exact(DateTime.now()) restriction = TimeRestriction(earliest=DateTime.now(), latest=None, catchup=True) result = dataset_timetable.next_dagrun_info( last_automated_data_interval=last_interval, restriction=restriction ) assert result is None or isinstance(result, DagRunInfo)
Tests the generate_run_id method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test.
def test_generate_run_id(dataset_timetable: DatasetOrTimeSchedule) -> None: """ Tests the generate_run_id method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test. """ run_id = dataset_timetable.generate_run_id( run_type=DagRunType.MANUAL, extra_args="test", logical_date=DateTime.now(), data_interval=None ) assert isinstance(run_id, str)
Pytest fixture for creating mock DatasetEvent objects.
def dataset_events(mocker) -> list[DatasetEvent]: """Pytest fixture for creating mock DatasetEvent objects.""" now = DateTime.now() earlier = now.subtract(days=1) later = now.add(days=1) # Create mock source_dag_run objects mock_dag_run_earlier = mocker.MagicMock() mock_dag_run_earlier.data_interval_start = earlier mock_dag_run_earlier.data_interval_end = now mock_dag_run_later = mocker.MagicMock() mock_dag_run_later.data_interval_start = now mock_dag_run_later.data_interval_end = later # Create DatasetEvent objects with mock source_dag_run event_earlier = DatasetEvent(timestamp=earlier, dataset_id=1) event_later = DatasetEvent(timestamp=later, dataset_id=1) # Use mocker to set the source_dag_run attribute to avoid SQLAlchemy's instrumentation mocker.patch.object(event_earlier, "source_dag_run", new=mock_dag_run_earlier) mocker.patch.object(event_later, "source_dag_run", new=mock_dag_run_later) return [event_earlier, event_later]
Tests the data_interval_for_events method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test. :param dataset_events: A list of mock DatasetEvent instances.
def test_data_interval_for_events( dataset_timetable: DatasetOrTimeSchedule, dataset_events: list[DatasetEvent] ) -> None: """ Tests the data_interval_for_events method of DatasetTimetable. :param dataset_timetable: The DatasetTimetable instance to test. :param dataset_events: A list of mock DatasetEvent instances. """ data_interval = dataset_timetable.data_interval_for_events( logical_date=DateTime.now(), events=dataset_events ) assert data_interval.start == min( event.timestamp for event in dataset_events ), "Data interval start does not match" assert data_interval.end == max( event.timestamp for event in dataset_events ), "Data interval end does not match"
Tests that DatasetOrTimeSchedule inherits run_ordering from its parent class correctly. :param dataset_timetable: The DatasetTimetable instance to test.
def test_run_ordering_inheritance(dataset_timetable: DatasetOrTimeSchedule) -> None: """ Tests that DatasetOrTimeSchedule inherits run_ordering from its parent class correctly. :param dataset_timetable: The DatasetTimetable instance to test. """ assert hasattr( dataset_timetable, "run_ordering" ), "DatasetOrTimeSchedule should have 'run_ordering' attribute" parent_run_ordering = getattr(DatasetTriggeredTimetable, "run_ordering", None) assert ( dataset_timetable.run_ordering == parent_run_ordering ), "run_ordering does not match the parent class"
When not using strict event dates, manual runs have run_after as the data interval
def test_manual_with_unrestricted(unrestricted_timetable: Timetable, restriction: TimeRestriction): """When not using strict event dates, manual runs have run_after as the data interval""" manual_run_data_interval = unrestricted_timetable.infer_manual_data_interval(run_after=NON_EVENT_DATE) expected_data_interval = DataInterval.exact(NON_EVENT_DATE) assert expected_data_interval == manual_run_data_interval
Test that when using strict event dates, manual runs after the first event have the most recent event's date as the start interval
def test_manual_with_restricted_middle(restricted_timetable: Timetable, restriction: TimeRestriction): """ Test that when using strict event dates, manual runs after the first event have the most recent event's date as the start interval """ manual_run_data_interval = restricted_timetable.infer_manual_data_interval(run_after=NON_EVENT_DATE) expected_data_interval = DataInterval.exact(MOST_RECENT_EVENT) assert expected_data_interval == manual_run_data_interval
Test that when using strict event dates, manual runs before the first event have the first event's date as the start interval
def test_manual_with_restricted_before(restricted_timetable: Timetable, restriction: TimeRestriction): """ Test that when using strict event dates, manual runs before the first event have the first event's date as the start interval """ manual_run_data_interval = restricted_timetable.infer_manual_data_interval(run_after=BEFORE_DATE) expected_data_interval = DataInterval.exact(EVENT_DATES[0]) assert expected_data_interval == manual_run_data_interval
The next four subsequent runs cover the next four weekdays each.
def test_subsequent_weekday_schedule( unrestricted_timetable: Timetable, restriction: TimeRestriction, last_automated_data_interval: DataInterval, expected_next_info: DagRunInfo, ): """The next four subsequent runs cover the next four weekdays each.""" next_info = unrestricted_timetable.next_dagrun_info( last_automated_data_interval=last_automated_data_interval, restriction=restriction, ) assert next_info == expected_next_info
If ``catchup=False`` and start_date is a day before
def test_no_catchup_first_starts_at_current_time( last_automated_data_interval: DataInterval | None, ) -> None: """If ``catchup=False`` and start_date is a day before""" next_info = CRON_TIMETABLE.next_dagrun_info( last_automated_data_interval=last_automated_data_interval, restriction=TimeRestriction(earliest=YESTERDAY, latest=None, catchup=False), ) expected_start = YESTERDAY + DELTA_FROM_MIDNIGHT assert next_info == DagRunInfo.interval(start=expected_start, end=CURRENT_TIME + DELTA_FROM_MIDNIGHT)
First run after DAG has new schedule interval.
def test_new_schedule_interval_next_info_starts_at_new_time( earliest: pendulum.DateTime | None, catchup: bool, ) -> None: """First run after DAG has new schedule interval.""" next_info = CRON_TIMETABLE.next_dagrun_info( last_automated_data_interval=OLD_INTERVAL, restriction=TimeRestriction(earliest=earliest, latest=None, catchup=catchup), ) expected_start = YESTERDAY + datetime.timedelta(hours=16, minutes=30) expected_end = CURRENT_TIME + datetime.timedelta(hours=16, minutes=30) assert next_info == DagRunInfo.interval(start=expected_start, end=expected_end)
If ``catchup=False``, the next data interval ends at the current time.
def test_no_catchup_next_info_starts_at_current_time( timetable: Timetable, last_automated_data_interval: DataInterval | None, ) -> None: """If ``catchup=False``, the next data interval ends at the current time.""" next_info = timetable.next_dagrun_info( last_automated_data_interval=last_automated_data_interval, restriction=TimeRestriction(earliest=START_DATE, latest=None, catchup=False), ) expected_start = CURRENT_TIME - datetime.timedelta(hours=1) assert next_info == DagRunInfo.interval(start=expected_start, end=CURRENT_TIME)
If ``catchup=True``, the next interval starts at the previous's end.
def test_catchup_next_info_starts_at_previous_interval_end(timetable: Timetable) -> None: """If ``catchup=True``, the next interval starts at the previous's end.""" next_info = timetable.next_dagrun_info( last_automated_data_interval=PREV_DATA_INTERVAL, restriction=TimeRestriction(earliest=START_DATE, latest=None, catchup=True), ) expected_end = PREV_DATA_INTERVAL_END + datetime.timedelta(hours=1) assert next_info == DagRunInfo.interval(start=PREV_DATA_INTERVAL_END, end=expected_end)
If ``catchup=False`` and start_date is a day before
def test_daily_cron_trigger_no_catchup_first_starts_at_next_schedule( last_automated_data_interval: DataInterval | None, next_start_time: pendulum.DateTime, ) -> None: """If ``catchup=False`` and start_date is a day before""" timetable = CronTriggerTimetable("30 16 * * *", timezone=utc) next_info = timetable.next_dagrun_info( last_automated_data_interval=last_automated_data_interval, restriction=TimeRestriction(earliest=YESTERDAY, latest=None, catchup=False), ) assert next_info == DagRunInfo.exact(next_start_time)
Since DAG starts on Saturday, and the first Monday is a holiday, the first ever run covers the next Tuesday and schedules on Wednesday.
def test_first_schedule(timetable: Timetable, restriction: TimeRestriction): """Since DAG starts on Saturday, and the first Monday is a holiday, the first ever run covers the next Tuesday and schedules on Wednesday.""" next_info = timetable.next_dagrun_info(last_automated_data_interval=None, restriction=restriction) assert next_info == DagRunInfo.interval(WEEK_1_WEEKDAYS[1], WEEK_1_WEEKDAYS[2])
The next four subsequent runs cover the next four weekdays each.
def test_subsequent_weekday_schedule( timetable: Timetable, restriction: TimeRestriction, last_automated_data_interval: DataInterval, expected_next_info: DagRunInfo, ): """The next four subsequent runs cover the next four weekdays each.""" next_info = timetable.next_dagrun_info( last_automated_data_interval=last_automated_data_interval, restriction=restriction, ) assert next_info == expected_next_info
The run after Friday's run covers Monday.
def test_next_schedule_after_friday(timetable: Timetable, restriction: TimeRestriction): """The run after Friday's run covers Monday.""" last_automated_data_interval = DataInterval(WEEK_1_WEEKDAYS[-1], WEEK_1_SATURDAY) expected_next_info = DagRunInfo.interval(WEEK_2_MONDAY, WEEK_2_TUESDAY) next_info = timetable.next_dagrun_info( last_automated_data_interval=last_automated_data_interval, restriction=restriction, ) assert next_info == expected_next_info
A simple DAG with a single task. NotPreviouslySkippedDep is met.
def test_no_parent(session, dag_maker): """ A simple DAG with a single task. NotPreviouslySkippedDep is met. """ start_date = pendulum.datetime(2020, 1, 1) with dag_maker( "test_test_no_parent_dag", schedule=None, start_date=start_date, session=session, ): op1 = EmptyOperator(task_id="op1") (ti1,) = dag_maker.create_dagrun(execution_date=start_date).task_instances ti1.refresh_from_task(op1) dep = NotPreviouslySkippedDep() assert len(list(dep.get_dep_statuses(ti1, session, DepContext()))) == 0 assert dep.is_met(ti1, session) assert ti1.state != State.SKIPPED
A simple DAG with no branching. Both op1 and op2 are EmptyOperator. NotPreviouslySkippedDep is met.
def test_no_skipmixin_parent(session, dag_maker): """ A simple DAG with no branching. Both op1 and op2 are EmptyOperator. NotPreviouslySkippedDep is met. """ start_date = pendulum.datetime(2020, 1, 1) with dag_maker( "test_no_skipmixin_parent_dag", schedule=None, start_date=start_date, session=session, ): op1 = EmptyOperator(task_id="op1") op2 = EmptyOperator(task_id="op2") op1 >> op2 _, ti2 = dag_maker.create_dagrun().task_instances ti2.refresh_from_task(op2) dep = NotPreviouslySkippedDep() assert len(list(dep.get_dep_statuses(ti2, session, DepContext()))) == 0 assert dep.is_met(ti2, session) assert ti2.state != State.SKIPPED
A simple DAG with a BranchPythonOperator that follows op2. NotPreviouslySkippedDep is met.
def test_parent_follow_branch(session, dag_maker): """ A simple DAG with a BranchPythonOperator that follows op2. NotPreviouslySkippedDep is met. """ start_date = pendulum.datetime(2020, 1, 1) with dag_maker( "test_parent_follow_branch_dag", schedule=None, start_date=start_date, session=session, ): op1 = BranchPythonOperator(task_id="op1", python_callable=lambda: "op2") op2 = EmptyOperator(task_id="op2") op1 >> op2 dagrun = dag_maker.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING) ti, ti2 = dagrun.task_instances ti.run() dep = NotPreviouslySkippedDep() assert len(list(dep.get_dep_statuses(ti2, session, DepContext()))) == 0 assert dep.is_met(ti2, session) assert ti2.state != State.SKIPPED
A simple DAG with a BranchPythonOperator that does not follow op2. NotPreviouslySkippedDep is not met.
def test_parent_skip_branch(session, dag_maker): """ A simple DAG with a BranchPythonOperator that does not follow op2. NotPreviouslySkippedDep is not met. """ start_date = pendulum.datetime(2020, 1, 1) with dag_maker( "test_parent_skip_branch_dag", schedule=None, start_date=start_date, session=session, ): op1 = BranchPythonOperator(task_id="op1", python_callable=lambda: "op3") op2 = EmptyOperator(task_id="op2") op3 = EmptyOperator(task_id="op3") op1 >> [op2, op3] tis = { ti.task_id: ti for ti in dag_maker.create_dagrun(run_type=DagRunType.MANUAL, state=State.RUNNING).task_instances } tis["op1"].run() dep = NotPreviouslySkippedDep() assert len(list(dep.get_dep_statuses(tis["op2"], session, DepContext()))) == 1 assert not dep.is_met(tis["op2"], session) assert tis["op2"].state == State.SKIPPED
A simple DAG with a BranchPythonOperator that does not follow op2. Parent task is not yet executed (no xcom data). NotPreviouslySkippedDep is met (no decision).
def test_parent_not_executed(session, dag_maker): """ A simple DAG with a BranchPythonOperator that does not follow op2. Parent task is not yet executed (no xcom data). NotPreviouslySkippedDep is met (no decision). """ start_date = pendulum.datetime(2020, 1, 1) with dag_maker( "test_parent_not_executed_dag", schedule=None, start_date=start_date, session=session, ): op1 = BranchPythonOperator(task_id="op1", python_callable=lambda: "op3") op2 = EmptyOperator(task_id="op2") op3 = EmptyOperator(task_id="op3") op1 >> [op2, op3] _, ti2, _ = dag_maker.create_dagrun().task_instances ti2.refresh_from_task(op2) dep = NotPreviouslySkippedDep() assert len(list(dep.get_dep_statuses(ti2, session, DepContext()))) == 0 assert dep.is_met(ti2, session) assert ti2.state == State.NONE
If the dag's execution date is in the future but (allow_trigger_in_future=False or not schedule) this dep should fail
def test_exec_date_dep( dag_maker, session, create_dummy_dag, allow_trigger_in_future, schedule, execution_date, is_met, ): """ If the dag's execution date is in the future but (allow_trigger_in_future=False or not schedule) this dep should fail """ with patch.object(settings, "ALLOW_FUTURE_EXEC_DATES", allow_trigger_in_future): create_dummy_dag( "test_localtaskjob_heartbeat", start_date=datetime(2015, 1, 1), end_date=datetime(2016, 11, 5), schedule=schedule, with_dagrun_type=DagRunType.MANUAL, session=session, ) (ti,) = dag_maker.create_dagrun(execution_date=execution_date).task_instances assert RunnableExecDateDep().is_met(ti=ti) == is_met
If the dag's execution date is in the future this dep should fail
def test_exec_date_after_end_date(session, dag_maker, create_dummy_dag): """ If the dag's execution date is in the future this dep should fail """ create_dummy_dag( "test_localtaskjob_heartbeat", start_date=datetime(2015, 1, 1), end_date=datetime(2016, 11, 5), schedule=None, with_dagrun_type=DagRunType.MANUAL, session=session, ) (ti,) = dag_maker.create_dagrun(execution_date=datetime(2016, 11, 2)).task_instances assert not RunnableExecDateDep().is_met(ti=ti)
t3 depends on t2, which depends on t1 for expansion. Since t1 has not yet run, t2 has not expanded yet, and we need to guarantee this lack of expansion does not fail the dependency-checking logic.
def test_mapped_task_check_before_expand(dag_maker, session, flag_upstream_failed): """ t3 depends on t2, which depends on t1 for expansion. Since t1 has not yet run, t2 has not expanded yet, and we need to guarantee this lack of expansion does not fail the dependency-checking logic. """ with dag_maker(session=session): @task def t(x): return x @task_group def tg(a): b = t.override(task_id="t2")(a) c = t.override(task_id="t3")(b) return c tg.expand(a=t([1, 2, 3])) dr: DagRun = dag_maker.create_dagrun() _test_trigger_rule( ti=next(ti for ti in dr.task_instances if ti.task_id == "tg.t3" and ti.map_index == -1), session=session, flag_upstream_failed=flag_upstream_failed, expected_reason="requires all upstream tasks to have succeeded, but found 1", )
t3 depends on t2, which was skipped before it was expanded. We need to guarantee this lack of expansion does not fail the dependency-checking logic.
def test_mapped_task_group_finished_upstream_before_expand( dag_maker, session, flag_upstream_failed, expected_ti_state ): """ t3 depends on t2, which was skipped before it was expanded. We need to guarantee this lack of expansion does not fail the dependency-checking logic. """ with dag_maker(session=session): @task def t(x): return x @task_group def tg(x): return t.override(task_id="t3")(x=x) t.override(task_id="t2").expand(x=t.override(task_id="t1")([1, 2])) >> tg.expand(x=[1, 2]) dr: DagRun = dag_maker.create_dagrun() tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)} tis["t2"].set_state(SKIPPED, session=session) session.flush() _test_trigger_rule( ti=tis["tg.t3"], session=session, flag_upstream_failed=flag_upstream_failed, expected_reason="requires all upstream tasks to have succeeded, but found 1", expected_ti_state=expected_ti_state, )
Dynamically mapped setup task with successful and removed upstream tasks. Expect rule to be successful. State is set to REMOVED for map index >= n success
def test_setup_constraint_mapped_task_upstream_removed_and_success( dag_maker, session, get_mapped_task_dagrun, map_index, flag_upstream_failed, expected_ti_state, ): """ Dynamically mapped setup task with successful and removed upstream tasks. Expect rule to be successful. State is set to REMOVED for map index >= n success """ dr, _, setup_task = get_mapped_task_dagrun(add_setup_tasks=True) ti = dr.get_task_instance(task_id="setup_3", map_index=map_index, session=session) ti.task = setup_task _test_trigger_rule( ti=ti, session=session, flag_upstream_failed=flag_upstream_failed, expected_ti_state=expected_ti_state, )
Setup task with a skipped upstream task. * If flag_upstream_failed is False then do not expect either a failure nor a modified state. * If flag_upstream_failed is True and wait_for_past_depends_before_skipping is False then expect the state to be set to SKIPPED but no failure. * If both flag_upstream_failed and wait_for_past_depends_before_skipping are True then if the past depends are met the state is expected to be SKIPPED and no failure, otherwise the state is not expected to change but the trigger rule should fail.
def test_setup_constraint_wait_for_past_depends_before_skipping( dag_maker, session, get_task_instance, monkeypatch, flag_upstream_failed, wait_for_past_depends_before_skipping, past_depends_met, expected_ti_state, expect_failure, ): """ Setup task with a skipped upstream task. * If flag_upstream_failed is False then do not expect either a failure nor a modified state. * If flag_upstream_failed is True and wait_for_past_depends_before_skipping is False then expect the state to be set to SKIPPED but no failure. * If both flag_upstream_failed and wait_for_past_depends_before_skipping are True then if the past depends are met the state is expected to be SKIPPED and no failure, otherwise the state is not expected to change but the trigger rule should fail. """ ti = get_task_instance( trigger_rule=TriggerRule.ALL_DONE, success=1, skipped=1, failed=0, removed=0, upstream_failed=0, done=2, setup_tasks=["FakeTaskID", "OtherFakeTaskID"], ) ti.task.xcom_pull.return_value = None xcom_mock = Mock(return_value=True if past_depends_met else None) with mock.patch("airflow.models.taskinstance.TaskInstance.xcom_pull", xcom_mock): _test_trigger_rule( ti=ti, session=session, flag_upstream_failed=flag_upstream_failed, wait_for_past_depends_before_skipping=wait_for_past_depends_before_skipping, expected_ti_state=expected_ti_state, expected_reason=( "Task should be skipped but the past depends are not met" if expect_failure else "" ), )
t3 indirectly depends on t1, which was skipped before it was expanded. We need to guarantee this lack of expansion does not fail the dependency-checking logic.
def test_setup_mapped_task_group_finished_upstream_before_expand( dag_maker, session, flag_upstream_failed, expected_ti_state ): """ t3 indirectly depends on t1, which was skipped before it was expanded. We need to guarantee this lack of expansion does not fail the dependency-checking logic. """ with dag_maker(session=session): @task(trigger_rule=TriggerRule.ALL_DONE) def t(x): return x @task_group def tg(x): return t.override(task_id="t3")(x=x) vals = t.override(task_id="t1")([1, 2]).as_setup() t.override(task_id="t2").expand(x=vals).as_setup() >> tg.expand(x=[1, 2]).as_setup() dr: DagRun = dag_maker.create_dagrun() tis = {ti.task_id: ti for ti in dr.get_task_instances(session=session)} tis["t1"].set_state(SKIPPED, session=session) tis["t2"].set_state(SUCCESS, session=session) session.flush() _test_trigger_rule( ti=tis["tg.t3"], session=session, flag_upstream_failed=flag_upstream_failed, expected_reason="All setup tasks must complete successfully.", expected_ti_state=expected_ti_state, )
Tests that the DateTimeTrigger validates input to moment arg, it should only accept datetime.
def test_input_validation(): """ Tests that the DateTimeTrigger validates input to moment arg, it should only accept datetime. """ with pytest.raises(TypeError, match="Expected datetime.datetime type for moment. Got <class 'str'>"): DateTimeTrigger("2012-01-01T03:03:03+00:00")
Tests that the DateTimeTrigger validates input to moment arg, it shouldn't accept naive datetime.
def test_input_validation_tz(): """ Tests that the DateTimeTrigger validates input to moment arg, it shouldn't accept naive datetime. """ moment = datetime.datetime(2013, 3, 31, 0, 59, 59) with pytest.raises(ValueError, match="You cannot pass naive datetimes"): DateTimeTrigger(moment)
Tests that the DateTimeTrigger correctly serializes its arguments and classpath.
def test_datetime_trigger_serialization(): """ Tests that the DateTimeTrigger correctly serializes its arguments and classpath. """ moment = pendulum.instance(datetime.datetime(2020, 4, 1, 13, 0), pendulum.UTC) trigger = DateTimeTrigger(moment) classpath, kwargs = trigger.serialize() assert classpath == "airflow.triggers.temporal.DateTimeTrigger" assert kwargs == {"moment": moment}
Tests that the TimeDeltaTrigger correctly serializes its arguments and classpath (it turns into a DateTimeTrigger).
def test_timedelta_trigger_serialization(): """ Tests that the TimeDeltaTrigger correctly serializes its arguments and classpath (it turns into a DateTimeTrigger). """ trigger = TimeDeltaTrigger(datetime.timedelta(seconds=10)) expected_moment = timezone.utcnow() + datetime.timedelta(seconds=10) classpath, kwargs = trigger.serialize() assert classpath == "airflow.triggers.temporal.DateTimeTrigger" # We need to allow for a little time difference to avoid this test being # flaky if it runs over the boundary of a single second assert -2 < (kwargs["moment"] - expected_moment).total_seconds() < 2
Adding failing callback and revert it back when closed.
def fail_action_logger_callback(): """Adding failing callback and revert it back when closed.""" tmp = cli_action_loggers.__pre_exec_callbacks[:] def fail_callback(**_): raise NotImplementedError cli_action_loggers.register_pre_exec_callback(fail_callback) yield cli_action_loggers.__pre_exec_callbacks = tmp
Temporary load the deprecated test configuration.
def use_config(config: str): """ Temporary load the deprecated test configuration. """ sections, proxies = remove_all_configurations() conf.read(str(Path(__file__).parents[1] / "config_templates" / config)) try: yield finally: restore_all_configurations(sections, proxies)
Temporary replaces deprecated options with the ones provided.
def set_deprecated_options(deprecated_options: dict[tuple[str, str], tuple[str, str, str]]): """ Temporary replaces deprecated options with the ones provided. """ old_deprecated_options = conf.deprecated_options conf.deprecated_options = deprecated_options try: yield finally: conf.deprecated_options = old_deprecated_options
Temporary replaces sensitive values with the ones provided.
def set_sensitive_config_values(sensitive_config_values: set[tuple[str, str]]): """ Temporary replaces sensitive values with the ones provided. """ old_sensitive_config_values = conf.sensitive_config_values conf.sensitive_config_values = sensitive_config_values try: yield finally: conf.sensitive_config_values = old_sensitive_config_values
Fixture that cleans the database before and after every test.
def clean_database(): """Fixture that cleans the database before and after every test.""" clear_db_runs() clear_db_datasets() clear_db_dags() yield # Test runs here clear_db_dags() clear_db_datasets() clear_db_runs()
Creates a test DAG with a few operators to test on.
def test_dag(): """Creates a test DAG with a few operators to test on.""" def f(task_id): return f"OP:{task_id}" with DAG(dag_id="test_xcom_dag", default_args=DEFAULT_ARGS) as dag: operators = [PythonOperator(python_callable=f, task_id=f"test_op_{i}") for i in range(4)] return dag, operators
Creates a test DAG with a few operators to test on, with some in a task group.
def test_taskgroup_dag(): """Creates a test DAG with a few operators to test on, with some in a task group.""" def f(task_id): return f"OP:{task_id}" with DAG(dag_id="test_xcom_dag", default_args=DEFAULT_ARGS) as dag: op1 = PythonOperator(python_callable=f, task_id="test_op_1") op4 = PythonOperator(python_callable=f, task_id="test_op_4") with TaskGroup("group_1") as group: op2 = PythonOperator(python_callable=f, task_id="test_op_2") op3 = PythonOperator(python_callable=f, task_id="test_op_3") return dag, group, (op1, op2, op3, op4)
Creates a test DAG with many operators and a task group.
def test_complex_taskgroup_dag(): """Creates a test DAG with many operators and a task group.""" def f(task_id): return f"OP:{task_id}" with DAG(dag_id="test_complex_dag", default_args=DEFAULT_ARGS) as dag: with TaskGroup("group_1") as group: group_emp1 = EmptyOperator(task_id="group_empty1") group_emp2 = EmptyOperator(task_id="group_empty2") group_emp3 = EmptyOperator(task_id="group_empty3") emp_in1 = EmptyOperator(task_id="empty_in1") emp_in2 = EmptyOperator(task_id="empty_in2") emp_in3 = EmptyOperator(task_id="empty_in3") emp_in4 = EmptyOperator(task_id="empty_in4") emp_out1 = EmptyOperator(task_id="empty_out1") emp_out2 = EmptyOperator(task_id="empty_out2") emp_out3 = EmptyOperator(task_id="empty_out3") emp_out4 = EmptyOperator(task_id="empty_out4") op_in1 = PythonOperator(python_callable=f, task_id="op_in1") op_out1 = PythonOperator(python_callable=f, task_id="op_out1") return ( dag, group, ( group_emp1, group_emp2, group_emp3, emp_in1, emp_in2, emp_in3, emp_in4, emp_out1, emp_out2, emp_out3, emp_out4, op_in1, op_out1, ), )
Creates a test DAG with many operators and multiple task groups.
def test_multiple_taskgroups_dag(): """Creates a test DAG with many operators and multiple task groups.""" def f(task_id): return f"OP:{task_id}" with DAG(dag_id="test_multiple_task_group_dag", default_args=DEFAULT_ARGS) as dag: with TaskGroup("group1") as group1: group1_emp1 = EmptyOperator(task_id="group1_empty1") group1_emp2 = EmptyOperator(task_id="group1_empty2") group1_emp3 = EmptyOperator(task_id="group1_empty3") with TaskGroup("group2") as group2: group2_emp1 = EmptyOperator(task_id="group2_empty1") group2_emp2 = EmptyOperator(task_id="group2_empty2") group2_emp3 = EmptyOperator(task_id="group2_empty3") group2_op1 = PythonOperator(python_callable=f, task_id="group2_op1") group2_op2 = PythonOperator(python_callable=f, task_id="group2_op2") with TaskGroup("group3") as group3: group3_emp1 = EmptyOperator(task_id="group3_empty1") group3_emp2 = EmptyOperator(task_id="group3_empty2") group3_emp3 = EmptyOperator(task_id="group3_empty3") emp_in1 = EmptyOperator(task_id="empty_in1") emp_in2 = EmptyOperator(task_id="empty_in2") emp_in3 = EmptyOperator(task_id="empty_in3") emp_in4 = EmptyOperator(task_id="empty_in4") emp_out1 = EmptyOperator(task_id="empty_out1") emp_out2 = EmptyOperator(task_id="empty_out2") emp_out3 = EmptyOperator(task_id="empty_out3") emp_out4 = EmptyOperator(task_id="empty_out4") op_in1 = PythonOperator(python_callable=f, task_id="op_in1") op_out1 = PythonOperator(python_callable=f, task_id="op_out1") return ( dag, group1, group2, group3, ( group1_emp1, group1_emp2, group1_emp3, group2_emp1, group2_emp2, group2_emp3, group2_op1, group2_op2, group3_emp1, group3_emp2, group3_emp3, emp_in1, emp_in2, emp_in3, emp_in4, emp_out1, emp_out2, emp_out3, emp_out4, op_in1, op_out1, ), )
Notice there are two messages with timestamp `2023-01-17T12:47:11.883-0800`. In this case, these should appear in correct order and be deduped in result.
def test_interleave_logs_correct_ordering(): """ Notice there are two messages with timestamp `2023-01-17T12:47:11.883-0800`. In this case, these should appear in correct order and be deduped in result. """ sample_with_dupe = """[2023-01-17T12:46:55.868-0800] {temporal.py:62} INFO - trigger starting [2023-01-17T12:46:55.868-0800] {temporal.py:71} INFO - sleeping 1 second... [2023-01-17T12:47:09.882-0800] {temporal.py:71} INFO - sleeping 1 second... [2023-01-17T12:47:10.882-0800] {temporal.py:71} INFO - sleeping 1 second... [2023-01-17T12:47:11.883-0800] {temporal.py:74} INFO - yielding event with payload DateTime(2023, 1, 17, 20, 47, 11, 254388, tzinfo=Timezone('UTC')) [2023-01-17T12:47:11.883-0800] {triggerer_job.py:540} INFO - Trigger <airflow.triggers.temporal.DateTimeTrigger moment=2023-01-17T20:47:11.254388+00:00> (ID 1) fired: TriggerEvent<DateTime(2023, 1, 17, 20, 47, 11, 254388, tzinfo=Timezone('UTC'))> """ assert sample_with_dupe == "\n".join(_interleave_logs(sample_with_dupe, "", sample_with_dupe))
Make sure DagRunState.QUEUED is converted to string 'queued' when referenced in DB query
def test_dagrun_state_enum_escape(): """ Make sure DagRunState.QUEUED is converted to string 'queued' when referenced in DB query """ with create_session() as session: dag = DAG(dag_id="test_dagrun_state_enum_escape", start_date=DEFAULT_DATE) dag.create_dagrun( run_type=DagRunType.SCHEDULED, state=DagRunState.QUEUED, execution_date=DEFAULT_DATE, start_date=DEFAULT_DATE, data_interval=dag.timetable.infer_manual_data_interval(run_after=DEFAULT_DATE), session=session, ) query = session.query( DagRun.dag_id, DagRun.state, DagRun.run_type, ).filter( DagRun.dag_id == dag.dag_id, # make sure enum value can be used in filter queries DagRun.state == DagRunState.QUEUED, ) assert str(query.statement.compile(compile_kwargs={"literal_binds": True})) == ( "SELECT dag_run.dag_id, dag_run.state, dag_run.run_type \n" "FROM dag_run \n" "WHERE dag_run.dag_id = 'test_dagrun_state_enum_escape' AND dag_run.state = 'queued'" ) rows = query.all() assert len(rows) == 1 assert rows[0].dag_id == dag.dag_id # make sure value in db is stored as `queued`, not `DagRunType.QUEUED` assert rows[0].state == "queued" session.rollback()
This is an alternative syntax to use TaskGroup. It should result in the same TaskGroup as using context manager.
def test_build_task_group(): """ This is an alternative syntax to use TaskGroup. It should result in the same TaskGroup as using context manager. """ execution_date = pendulum.parse("20200101") dag = DAG("test_build_task_group", start_date=execution_date) task1 = EmptyOperator(task_id="task1", dag=dag) group234 = TaskGroup("group234", dag=dag) _ = EmptyOperator(task_id="task2", dag=dag, task_group=group234) group34 = TaskGroup("group34", dag=dag, parent_group=group234) _ = EmptyOperator(task_id="task3", dag=dag, task_group=group34) _ = EmptyOperator(task_id="task4", dag=dag, task_group=group34) task5 = EmptyOperator(task_id="task5", dag=dag) task1 >> group234 group34 >> task5 assert task_group_to_dict(dag.task_group) == EXPECTED_JSON
Tests that prefix_group_id turns on/off prefixing of task_id with group_id.
def test_build_task_group_with_prefix(): """ Tests that prefix_group_id turns on/off prefixing of task_id with group_id. """ execution_date = pendulum.parse("20200101") with DAG("test_build_task_group_with_prefix", start_date=execution_date) as dag: task1 = EmptyOperator(task_id="task1") with TaskGroup("group234", prefix_group_id=False) as group234: task2 = EmptyOperator(task_id="task2") with TaskGroup("group34") as group34: task3 = EmptyOperator(task_id="task3") with TaskGroup("group4", prefix_group_id=False) as group4: task4 = EmptyOperator(task_id="task4") task5 = EmptyOperator(task_id="task5") task1 >> group234 group34 >> task5 assert task2.task_id == "task2" assert group34.group_id == "group34" assert task3.task_id == "group34.task3" assert group4.group_id == "group34.group4" assert task4.task_id == "task4" assert task5.task_id == "task5" assert group234.get_child_by_label("task2") == task2 assert group234.get_child_by_label("group34") == group34 assert group4.get_child_by_label("task4") == task4 assert extract_node_id(task_group_to_dict(dag.task_group), include_label=True) == { "id": None, "label": None, "children": [ { "id": "group234", "label": "group234", "children": [ { "id": "group34", "label": "group34", "children": [ { "id": "group34.group4", "label": "group4", "children": [{"id": "task4", "label": "task4"}], }, {"id": "group34.task3", "label": "task3"}, {"id": "group34.downstream_join_id", "label": ""}, ], }, {"id": "task2", "label": "task2"}, {"id": "group234.upstream_join_id", "label": ""}, ], }, {"id": "task1", "label": "task1"}, {"id": "task5", "label": "task5"}, ], }
Test that TaskGroup can be used with the @task decorator.
def test_build_task_group_with_task_decorator(): """ Test that TaskGroup can be used with the @task decorator. """ from airflow.decorators import task @task def task_1(): print("task_1") @task def task_2(): return "task_2" @task def task_3(): return "task_3" @task def task_4(task_2_output, task_3_output): print(task_2_output, task_3_output) @task def task_5(): print("task_5") execution_date = pendulum.parse("20200101") with DAG("test_build_task_group_with_task_decorator", start_date=execution_date) as dag: tsk_1 = task_1() with TaskGroup("group234") as group234: tsk_2 = task_2() tsk_3 = task_3() tsk_4 = task_4(tsk_2, tsk_3) tsk_5 = task_5() tsk_1 >> group234 >> tsk_5 assert tsk_1.operator in tsk_2.operator.upstream_list assert tsk_1.operator in tsk_3.operator.upstream_list assert tsk_5.operator in tsk_4.operator.downstream_list assert extract_node_id(task_group_to_dict(dag.task_group)) == { "id": None, "children": [ { "id": "group234", "children": [ {"id": "group234.task_2"}, {"id": "group234.task_3"}, {"id": "group234.task_4"}, {"id": "group234.upstream_join_id"}, {"id": "group234.downstream_join_id"}, ], }, {"id": "task_1"}, {"id": "task_5"}, ], } edges = dag_edges(dag) assert sorted((e["source_id"], e["target_id"]) for e in edges) == [ ("group234.downstream_join_id", "task_5"), ("group234.task_2", "group234.task_4"), ("group234.task_3", "group234.task_4"), ("group234.task_4", "group234.downstream_join_id"), ("group234.upstream_join_id", "group234.task_2"), ("group234.upstream_join_id", "group234.task_3"), ("task_1", "group234.upstream_join_id"), ]
Tests dag.partial_subset() updates task_group correctly.
def test_sub_dag_task_group(): """ Tests dag.partial_subset() updates task_group correctly. """ execution_date = pendulum.parse("20200101") with DAG("test_test_task_group_sub_dag", start_date=execution_date) as dag: task1 = EmptyOperator(task_id="task1") with TaskGroup("group234") as group234: _ = EmptyOperator(task_id="task2") with TaskGroup("group34") as group34: _ = EmptyOperator(task_id="task3") _ = EmptyOperator(task_id="task4") with TaskGroup("group6") as group6: _ = EmptyOperator(task_id="task6") task7 = EmptyOperator(task_id="task7") task5 = EmptyOperator(task_id="task5") task1 >> group234 group34 >> task5 group234 >> group6 group234 >> task7 subdag = dag.partial_subset(task_ids_or_regex="task5", include_upstream=True, include_downstream=False) assert extract_node_id(task_group_to_dict(subdag.task_group)) == { "id": None, "children": [ { "id": "group234", "children": [ { "id": "group234.group34", "children": [ {"id": "group234.group34.task3"}, {"id": "group234.group34.task4"}, {"id": "group234.group34.downstream_join_id"}, ], }, {"id": "group234.upstream_join_id"}, ], }, {"id": "task1"}, {"id": "task5"}, ], } edges = dag_edges(subdag) assert sorted((e["source_id"], e["target_id"]) for e in edges) == [ ("group234.group34.downstream_join_id", "task5"), ("group234.group34.task3", "group234.group34.downstream_join_id"), ("group234.group34.task4", "group234.group34.downstream_join_id"), ("group234.upstream_join_id", "group234.group34.task3"), ("group234.upstream_join_id", "group234.group34.task4"), ("task1", "group234.upstream_join_id"), ] subdag_task_groups = subdag.task_group.get_task_group_dict() assert subdag_task_groups.keys() == {None, "group234", "group234.group34"} included_group_ids = {"group234", "group234.group34"} included_task_ids = {"group234.group34.task3", "group234.group34.task4", "task1", "task5"} for task_group in subdag_task_groups.values(): assert task_group.upstream_group_ids.issubset(included_group_ids) assert task_group.downstream_group_ids.issubset(included_group_ids) assert task_group.upstream_task_ids.issubset(included_task_ids) assert task_group.downstream_task_ids.issubset(included_task_ids) for task in subdag.task_group: assert task.upstream_task_ids.issubset(included_task_ids) assert task.downstream_task_ids.issubset(included_task_ids)
Test that if a task doesn't have a DAG when it's being set as the relative of another task which has a DAG, the task should be added to the root TaskGroup of the other task's DAG.
def test_task_without_dag(): """ Test that if a task doesn't have a DAG when it's being set as the relative of another task which has a DAG, the task should be added to the root TaskGroup of the other task's DAG. """ dag = DAG(dag_id="test_task_without_dag", start_date=pendulum.parse("20200101")) op1 = EmptyOperator(task_id="op1", dag=dag) op2 = EmptyOperator(task_id="op2") op3 = EmptyOperator(task_id="op3") op1 >> op2 op3 >> op2 assert op1.dag == op2.dag == op3.dag assert dag.task_group.children.keys() == {"op1", "op2", "op3"} assert dag.task_group.children.keys() == dag.task_dict.keys()
Tests Following : 1. Nested TaskGroup creation using taskgroup decorator should create same TaskGroup which can be created using TaskGroup context manager. 2. TaskGroup consisting Tasks created using task decorator. 3. Node Ids of dags created with taskgroup decorator.
def test_build_task_group_deco_context_manager(): """ Tests Following : 1. Nested TaskGroup creation using taskgroup decorator should create same TaskGroup which can be created using TaskGroup context manager. 2. TaskGroup consisting Tasks created using task decorator. 3. Node Ids of dags created with taskgroup decorator. """ from airflow.decorators import task # Creating Tasks @task def task_start(): """Dummy Task which is First Task of Dag""" return "[Task_start]" @task def task_end(): """Dummy Task which is Last Task of Dag""" print("[ Task_End ]") @task def task_1(value): """Dummy Task1""" return f"[ Task1 {value} ]" @task def task_2(value): """Dummy Task2""" print(f"[ Task2 {value} ]") @task def task_3(value): """Dummy Task3""" return f"[ Task3 {value} ]" @task def task_4(value): """Dummy Task3""" print(f"[ Task4 {value} ]") # Creating TaskGroups @task_group_decorator def section_1(value): """TaskGroup for grouping related Tasks""" @task_group_decorator() def section_2(value2): """TaskGroup for grouping related Tasks""" return task_4(task_3(value2)) op1 = task_2(task_1(value)) return section_2(op1) execution_date = pendulum.parse("20201109") with DAG( dag_id="example_nested_task_group_decorator", start_date=execution_date, tags=["example"] ) as dag: t_start = task_start() sec_1 = section_1(t_start) sec_1.set_downstream(task_end()) # Testing TaskGroup created using taskgroup decorator assert set(dag.task_group.children.keys()) == {"task_start", "task_end", "section_1"} assert set(dag.task_group.children["section_1"].children.keys()) == { "section_1.task_1", "section_1.task_2", "section_1.section_2", } # Testing TaskGroup consisting Tasks created using task decorator assert dag.task_dict["task_start"].downstream_task_ids == {"section_1.task_1"} assert dag.task_dict["section_1.task_2"].downstream_task_ids == {"section_1.section_2.task_3"} assert dag.task_dict["section_1.section_2.task_4"].downstream_task_ids == {"task_end"} # Node IDs test node_ids = { "id": None, "children": [ { "id": "section_1", "children": [ { "id": "section_1.section_2", "children": [ {"id": "section_1.section_2.task_3"}, {"id": "section_1.section_2.task_4"}, ], }, {"id": "section_1.task_1"}, {"id": "section_1.task_2"}, ], }, {"id": "task_end"}, {"id": "task_start"}, ], } assert extract_node_id(task_group_to_dict(dag.task_group)) == node_ids
A decorator-based task group should be able to be used as a relative to operators.
def test_build_task_group_depended_by_task(): """A decorator-based task group should be able to be used as a relative to operators.""" from airflow.decorators import dag as dag_decorator, task @dag_decorator(start_date=pendulum.now()) def build_task_group_depended_by_task(): @task def task_start(): return "[Task_start]" @task def task_end(): return "[Task_end]" @task def task_thing(value): return f"[Task_thing {value}]" @task_group_decorator def section_1(): task_thing(1) task_thing(2) task_start() >> section_1() >> task_end() dag = build_task_group_depended_by_task() task_thing_1 = dag.task_dict["section_1.task_thing"] task_thing_2 = dag.task_dict["section_1.task_thing__1"] # Tasks in the task group don't depend on each other; they both become # downstreams to task_start, and upstreams to task_end. assert task_thing_1.upstream_task_ids == task_thing_2.upstream_task_ids == {"task_start"} assert task_thing_1.downstream_task_ids == task_thing_2.downstream_task_ids == {"task_end"}