repo_id
stringlengths 15
132
| file_path
stringlengths 34
176
| content
stringlengths 2
3.52M
| __index_level_0__
int64 0
0
|
---|---|---|---|
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_concurent_execution.py | import re
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._utils.exception_utils import ErrorResponse
from promptflow._utils.logger_utils import LogContext
from promptflow.contracts.run_info import Status
from promptflow.contracts.run_mode import RunMode
from promptflow.executor._flow_nodes_scheduler import RUN_FLOW_NODES_LINEARLY
from promptflow.executor._result import LineResult
from promptflow.executor.flow_executor import FlowExecutor
from ..utils import get_flow_inputs, get_yaml_file, load_content
TEST_ROOT = Path(__file__).parent.parent.parent
FLOWS_ROOT = TEST_ROOT / "test_configs/flows"
FLOW_FOLDER = "concurrent_execution_flow"
@pytest.mark.e2etest
class TestConcurrentExecution:
def test_concurrent_run(self):
logs_directory = Path(mkdtemp())
executor = FlowExecutor.create(get_yaml_file(FLOW_FOLDER), {})
flow_run_log_path = str(logs_directory / "test_flow_run.log")
# flow run: test exec_line
with LogContext(flow_run_log_path, run_mode=RunMode.Test):
results = executor.exec_line(get_flow_inputs(FLOW_FOLDER))
log_content = load_content(flow_run_log_path)
pattern = r"\[wait_(\d+) in line None.*Thread (\d+)"
matches = re.findall(pattern, log_content)
wait_thread_mapping = {}
for wait, thread in matches:
if wait in wait_thread_mapping:
if wait_thread_mapping[wait] != thread:
raise Exception(f"wait_{wait} corresponds to more than one thread number")
else:
wait_thread_mapping[wait] = thread
self.assert_run_result(results)
assert (
results.run_info.system_metrics["duration"] < 10
), "run nodes concurrently should decrease the total run time."
def test_concurrent_run_with_exception(self):
executor = FlowExecutor.create(get_yaml_file(FLOW_FOLDER), {}, raise_ex=False)
flow_result = executor.exec_line({"input1": "True", "input2": "False", "input3": "False", "input4": "False"})
assert 2 < flow_result.run_info.system_metrics["duration"] < 4, "Should at least finish the running job."
error_response = ErrorResponse.from_error_dict(flow_result.run_info.error)
assert error_response.error_code_hierarchy == "UserError/ToolExecutionError"
def test_linear_run(self):
executor = FlowExecutor.create(get_yaml_file(FLOW_FOLDER), {})
# flow run: test exec_line run linearly
results = executor.exec_line(get_flow_inputs(FLOW_FOLDER), node_concurrency=RUN_FLOW_NODES_LINEARLY)
self.assert_run_result(results)
assert 15 > results.run_info.system_metrics["duration"] > 10, "run nodes linearly will consume more time."
def assert_run_result(self, result: LineResult):
# Validate the flow status
assert result.run_info.status == Status.Completed
# Validate the flow output
assert isinstance(result.output, dict)
# Validate the flow node run infos
assert len(result.node_run_infos) == 5
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_package_tool.py | import sys
from pathlib import Path
from unittest.mock import patch
import pytest
from promptflow._core._errors import PackageToolNotFoundError, ToolLoadError
from promptflow.contracts.run_info import Status
from promptflow.executor import FlowExecutor
from promptflow.executor._errors import NodeInputValidationError, ResolveToolError
from promptflow.executor._result import LineResult
from ..utils import WRONG_FLOW_ROOT, get_flow_package_tool_definition, get_flow_sample_inputs, get_yaml_file
PACKAGE_TOOL_BASE = Path(__file__).parent.parent / "package_tools"
PACKAGE_TOOL_ENTRY = "promptflow._core.tools_manager.collect_package_tools"
sys.path.insert(0, str(PACKAGE_TOOL_BASE.resolve()))
@pytest.mark.e2etest
class TestPackageTool:
def get_line_inputs(self, flow_folder=""):
if flow_folder:
inputs = self.get_bulk_inputs(flow_folder)
return inputs[0]
return {
"url": "https://www.microsoft.com/en-us/windows/",
"text": "some_text",
}
def get_bulk_inputs(self, nlinee=4, flow_folder=""):
if flow_folder:
inputs = get_flow_sample_inputs(flow_folder)
if isinstance(inputs, list) and len(inputs) > 0:
return inputs
elif isinstance(inputs, dict):
return [inputs]
else:
raise Exception(f"Invalid type of bulk input: {inputs}")
return [self.get_line_inputs() for _ in range(nlinee)]
def test_executor_package_tool_with_conn(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "tool_with_connection"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch(
"promptflow.tools.list.list_package_tools",
return_value=package_tool_definition,
)
name, secret = "dummy_name", "dummy_secret"
connections = {
"test_conn": {
"type": "TestConnection",
"value": {"name": name, "secret": secret},
}
}
executor = FlowExecutor.create(get_yaml_file(flow_folder), connections, raise_ex=True)
flow_result = executor.exec_line({})
assert flow_result.run_info.status == Status.Completed
assert len(flow_result.node_run_infos) == 1
for _, v in flow_result.node_run_infos.items():
assert v.status == Status.Completed
assert v.output == name + secret
@pytest.mark.skipif(sys.platform == "darwin", reason="Skip on Mac")
def test_executor_package_with_prompt_tool(self, dev_connections, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
with mocker.patch(PACKAGE_TOOL_ENTRY, return_value=package_tool_definition):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections, raise_ex=True)
bulk_inputs = self.get_bulk_inputs(flow_folder=flow_folder)
for i in bulk_inputs:
line_result = executor.exec_line(i)
assert isinstance(line_result, LineResult)
msg = f"Got {line_result.run_info.status} for input {i}"
assert line_result.run_info.status == Status.Completed, msg
def test_custom_llm_tool_with_duplicated_inputs(self, dev_connections, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool_with_duplicated_inputs"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
with mocker.patch(PACKAGE_TOOL_ENTRY, return_value=package_tool_definition):
msg = (
"Invalid inputs {'api'} in prompt template of node custom_llm_tool_with_duplicated_inputs. "
"These inputs are duplicated with the inputs of custom llm tool."
)
with pytest.raises(ResolveToolError, match=msg) as e:
FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
assert isinstance(e.value.inner_exception, NodeInputValidationError)
@pytest.mark.parametrize(
"flow_folder, error_class, inner_class, error_message",
[
(
"wrong_tool_in_package_tools",
ResolveToolError,
PackageToolNotFoundError,
"Tool load failed in 'search_by_text': (PackageToolNotFoundError) "
"Package tool 'promptflow.tools.serpapi.SerpAPI.search_11' is not found in the current environment. "
"All available package tools are: "
"['promptflow.tools.azure_content_safety.AzureContentSafety.analyze_text', "
"'promptflow.tools.azure_detect.AzureDetect.get_language'].",
),
(
"wrong_package_in_package_tools",
ResolveToolError,
PackageToolNotFoundError,
"Tool load failed in 'search_by_text': (PackageToolNotFoundError) "
"Package tool 'promptflow.tools.serpapi11.SerpAPI.search' is not found in the current environment. "
"All available package tools are: "
"['promptflow.tools.azure_content_safety.AzureContentSafety.analyze_text', "
"'promptflow.tools.azure_detect.AzureDetect.get_language'].",
),
],
)
def test_package_tool_execution(self, flow_folder, error_class, inner_class, error_message, dev_connections):
def mock_collect_package_tools(keys=None):
return {
"promptflow.tools.azure_content_safety.AzureContentSafety.analyze_text": None,
"promptflow.tools.azure_detect.AzureDetect.get_language": None,
}
with patch(PACKAGE_TOOL_ENTRY, side_effect=mock_collect_package_tools):
with pytest.raises(error_class) as exce_info:
FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT), dev_connections)
if isinstance(exce_info.value, ResolveToolError):
assert isinstance(exce_info.value.inner_exception, inner_class)
assert error_message == exce_info.value.message
@pytest.mark.parametrize(
"flow_folder, error_message",
[
(
"tool_with_init_error",
"Tool load failed in 'tool_with_init_error': "
"(ToolLoadError) Failed to load package tool 'Tool with init error': (Exception) Tool load error.",
)
],
)
def test_package_tool_load_error(self, flow_folder, error_message, dev_connections, mocker):
flow_folder = PACKAGE_TOOL_BASE / flow_folder
package_tool_definition = get_flow_package_tool_definition(flow_folder)
with mocker.patch(PACKAGE_TOOL_ENTRY, return_value=package_tool_definition):
with pytest.raises(ResolveToolError) as exce_info:
FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
assert isinstance(exce_info.value.inner_exception, ToolLoadError)
assert exce_info.value.message == error_message
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_batch_engine.py | import asyncio
import multiprocessing
import os
import uuid
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._utils.utils import dump_list_to_jsonl
from promptflow.batch._batch_engine import OUTPUT_FILE_NAME, BatchEngine
from promptflow.batch._errors import EmptyInputsData
from promptflow.batch._result import BatchResult
from promptflow.contracts.run_info import Status
from promptflow.executor._errors import InputNotFound
from ..utils import (
MemoryRunStorage,
get_flow_expected_metrics,
get_flow_expected_status_summary,
get_flow_folder,
get_flow_inputs_file,
get_flow_sample_inputs,
get_yaml_file,
load_jsonl,
)
SAMPLE_FLOW = "web_classification_no_variants"
SAMPLE_EVAL_FLOW = "classification_accuracy_evaluation"
SAMPLE_FLOW_WITH_PARTIAL_FAILURE = "python_tool_partial_failure"
async def async_submit_batch_run(flow_folder, inputs_mapping, connections):
batch_result = submit_batch_run(flow_folder, inputs_mapping, connections=connections)
await asyncio.sleep(1)
return batch_result
def run_batch_with_start_method(multiprocessing_start_method, flow_folder, inputs_mapping, dev_connections):
os.environ["PF_BATCH_METHOD"] = multiprocessing_start_method
batch_result, output_dir = submit_batch_run(
flow_folder, inputs_mapping, connections=dev_connections, return_output_dir=True
)
assert isinstance(batch_result, BatchResult)
nlines = get_batch_inputs_line(flow_folder)
assert batch_result.total_lines == nlines
assert batch_result.completed_lines == nlines
assert batch_result.start_time < batch_result.end_time
assert batch_result.system_metrics.duration > 0
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == nlines
for i, output in enumerate(outputs):
assert isinstance(output, dict)
assert "line_number" in output, f"line_number is not in {i}th output {output}"
assert output["line_number"] == i, f"line_number is not correct in {i}th output {output}"
def submit_batch_run(
flow_folder,
inputs_mapping,
*,
input_dirs={},
input_file_name="samples.json",
run_id=None,
connections={},
storage=None,
return_output_dir=False,
):
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=connections, storage=storage
)
if not input_dirs and inputs_mapping:
input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name=input_file_name)}
output_dir = Path(mkdtemp())
if return_output_dir:
return batch_engine.run(input_dirs, inputs_mapping, output_dir, run_id=run_id), output_dir
return batch_engine.run(input_dirs, inputs_mapping, output_dir, run_id=run_id)
def get_batch_inputs_line(flow_folder, sample_inputs_file="samples.json"):
inputs = get_flow_sample_inputs(flow_folder, sample_inputs_file=sample_inputs_file)
return len(inputs)
@pytest.mark.usefixtures("use_secrets_config_file", "dev_connections")
@pytest.mark.e2etest
class TestBatch:
def test_batch_storage(self, dev_connections):
mem_run_storage = MemoryRunStorage()
run_id = str(uuid.uuid4())
inputs_mapping = {"url": "${data.url}"}
batch_result = submit_batch_run(
SAMPLE_FLOW, inputs_mapping, run_id=run_id, connections=dev_connections, storage=mem_run_storage
)
nlines = get_batch_inputs_line(SAMPLE_FLOW)
assert batch_result.total_lines == nlines
assert batch_result.completed_lines == nlines
assert len(mem_run_storage._flow_runs) == nlines
assert all(flow_run_info.status == Status.Completed for flow_run_info in mem_run_storage._flow_runs.values())
assert all(node_run_info.status == Status.Completed for node_run_info in mem_run_storage._node_runs.values())
@pytest.mark.parametrize(
"flow_folder, inputs_mapping",
[
(
SAMPLE_FLOW,
{"url": "${data.url}"},
),
(
"prompt_tools",
{"text": "${data.text}"},
),
(
"script_with___file__",
{"text": "${data.text}"},
),
(
"sample_flow_with_functions",
{"question": "${data.question}"},
),
],
)
def test_batch_run(self, flow_folder, inputs_mapping, dev_connections):
batch_result, output_dir = submit_batch_run(
flow_folder, inputs_mapping, connections=dev_connections, return_output_dir=True
)
assert isinstance(batch_result, BatchResult)
nlines = get_batch_inputs_line(flow_folder)
assert batch_result.total_lines == nlines
assert batch_result.completed_lines == nlines
assert batch_result.start_time < batch_result.end_time
assert batch_result.system_metrics.duration > 0
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == nlines
for i, output in enumerate(outputs):
assert isinstance(output, dict)
assert "line_number" in output, f"line_number is not in {i}th output {output}"
assert output["line_number"] == i, f"line_number is not correct in {i}th output {output}"
@pytest.mark.parametrize(
"flow_folder, inputs_mapping",
[
(
SAMPLE_FLOW,
{"url": "${data.url}"},
),
(
"prompt_tools",
{"text": "${data.text}"},
),
(
"script_with___file__",
{"text": "${data.text}"},
),
(
"sample_flow_with_functions",
{"question": "${data.question}"},
),
],
)
def test_spawn_mode_batch_run(self, flow_folder, inputs_mapping, dev_connections):
if "spawn" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: spawn")
p = multiprocessing.Process(
target=run_batch_with_start_method, args=("spawn", flow_folder, inputs_mapping, dev_connections)
)
p.start()
p.join()
assert p.exitcode == 0
@pytest.mark.parametrize(
"flow_folder, inputs_mapping",
[
(
SAMPLE_FLOW,
{"url": "${data.url}"},
),
(
"prompt_tools",
{"text": "${data.text}"},
),
(
"script_with___file__",
{"text": "${data.text}"},
),
(
"sample_flow_with_functions",
{"question": "${data.question}"},
),
],
)
def test_forkserver_mode_batch_run(self, flow_folder, inputs_mapping, dev_connections):
if "forkserver" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: forkserver")
p = multiprocessing.Process(
target=run_batch_with_start_method, args=("forkserver", flow_folder, inputs_mapping, dev_connections)
)
p.start()
p.join()
assert p.exitcode == 0
def test_batch_run_then_eval(self, dev_connections):
batch_resutls, output_dir = submit_batch_run(
SAMPLE_FLOW, {"url": "${data.url}"}, connections=dev_connections, return_output_dir=True
)
nlines = get_batch_inputs_line(SAMPLE_FLOW)
assert batch_resutls.completed_lines == nlines
input_dirs = {"data": get_flow_inputs_file(SAMPLE_FLOW, file_name="samples.json"), "run.outputs": output_dir}
inputs_mapping = {
"variant_id": "baseline",
"groundtruth": "${data.url}",
"prediction": "${run.outputs.category}",
}
eval_result = submit_batch_run(SAMPLE_EVAL_FLOW, inputs_mapping, input_dirs=input_dirs)
assert eval_result.completed_lines == nlines, f"Only returned {eval_result.completed_lines}/{nlines} outputs."
assert len(eval_result.metrics) > 0, "No metrics are returned."
assert eval_result.metrics["accuracy"] == 0, f"Accuracy should be 0, got {eval_result.metrics}."
def test_batch_with_metrics(self, dev_connections):
flow_folder = SAMPLE_EVAL_FLOW
inputs_mapping = {
"variant_id": "${data.variant_id}",
"groundtruth": "${data.groundtruth}",
"prediction": "${data.prediction}",
}
batch_results = submit_batch_run(flow_folder, inputs_mapping, connections=dev_connections)
assert isinstance(batch_results, BatchResult)
assert isinstance(batch_results.metrics, dict)
assert batch_results.metrics == get_flow_expected_metrics(flow_folder)
assert batch_results.total_lines == batch_results.completed_lines
assert batch_results.node_status == get_flow_expected_status_summary(flow_folder)
def test_batch_with_partial_failure(self, dev_connections):
flow_folder = SAMPLE_FLOW_WITH_PARTIAL_FAILURE
inputs_mapping = {"idx": "${data.idx}", "mod": "${data.mod}", "mod_2": "${data.mod_2}"}
batch_results = submit_batch_run(flow_folder, inputs_mapping, connections=dev_connections)
assert isinstance(batch_results, BatchResult)
assert batch_results.total_lines == 10
assert batch_results.completed_lines == 5
assert batch_results.failed_lines == 5
assert batch_results.node_status == get_flow_expected_status_summary(flow_folder)
def test_batch_with_line_number(self, dev_connections):
flow_folder = SAMPLE_FLOW_WITH_PARTIAL_FAILURE
input_dirs = {"data": "inputs/data.jsonl", "output": "inputs/output.jsonl"}
inputs_mapping = {"idx": "${output.idx}", "mod": "${data.mod}", "mod_2": "${data.mod_2}"}
batch_results, output_dir = submit_batch_run(
flow_folder, inputs_mapping, input_dirs=input_dirs, connections=dev_connections, return_output_dir=True
)
assert isinstance(batch_results, BatchResult)
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == 2
assert outputs == [
{"line_number": 0, "output": 1},
{"line_number": 6, "output": 7},
]
def test_batch_with_openai_metrics(self, dev_connections):
inputs_mapping = {"url": "${data.url}"}
batch_result, output_dir = submit_batch_run(
SAMPLE_FLOW, inputs_mapping, connections=dev_connections, return_output_dir=True
)
nlines = get_batch_inputs_line(SAMPLE_FLOW)
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == nlines
assert batch_result.system_metrics.total_tokens > 0
assert batch_result.system_metrics.prompt_tokens > 0
assert batch_result.system_metrics.completion_tokens > 0
def test_batch_with_default_input(self):
mem_run_storage = MemoryRunStorage()
default_input_value = "input value from default"
inputs_mapping = {"text": "${data.text}"}
batch_result, output_dir = submit_batch_run(
"default_input", inputs_mapping, storage=mem_run_storage, return_output_dir=True
)
assert batch_result.total_lines == batch_result.completed_lines
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == 1
assert outputs[0]["output"] == default_input_value
assert all(
node_run_info.status == Status.Completed and node_run_info.output == [default_input_value]
for node_run_info in mem_run_storage._node_runs.values()
if node_run_info.node == "aggregate_node"
)
@pytest.mark.parametrize(
"flow_folder, batch_input, expected_type",
[
("simple_aggregation", [{"text": 4}], str),
("simple_aggregation", [{"text": 4.5}], str),
("simple_aggregation", [{"text": "3.0"}], str),
],
)
def test_batch_run_line_result(self, flow_folder, batch_input, expected_type):
mem_run_storage = MemoryRunStorage()
input_file = Path(mkdtemp()) / "inputs.jsonl"
dump_list_to_jsonl(input_file, batch_input)
input_dirs = {"data": input_file}
inputs_mapping = {"text": "${data.text}"}
batch_results = submit_batch_run(flow_folder, inputs_mapping, input_dirs=input_dirs, storage=mem_run_storage)
assert isinstance(batch_results, BatchResult)
assert all(
type(flow_run_info.inputs["text"]) is expected_type for flow_run_info in mem_run_storage._flow_runs.values()
)
@pytest.mark.parametrize(
"flow_folder, input_mapping, error_class, error_message",
[
(
"connection_as_input",
{},
InputNotFound,
"The input for flow cannot be empty in batch mode. Please review your flow and provide valid inputs.",
),
(
"script_with___file__",
{"text": "${data.text}"},
EmptyInputsData,
"Couldn't find any inputs data at the given input paths. Please review the provided path "
"and consider resubmitting.",
),
],
)
def test_batch_run_failure(self, flow_folder, input_mapping, error_class, error_message):
with pytest.raises(error_class) as e:
submit_batch_run(flow_folder, input_mapping, input_file_name="empty_inputs.jsonl")
assert error_message in e.value.message
def test_batch_run_in_existing_loop(self, dev_connections):
flow_folder = "prompt_tools"
inputs_mapping = {"text": "${data.text}"}
batch_result = asyncio.run(async_submit_batch_run(flow_folder, inputs_mapping, dev_connections))
assert isinstance(batch_result, BatchResult)
assert batch_result.total_lines == batch_result.completed_lines
def test_batch_run_with_aggregation_failure(self, dev_connections):
flow_folder = "aggregation_node_failed"
inputs_mapping = {"groundtruth": "${data.groundtruth}", "prediction": "${data.prediction}"}
batch_result = submit_batch_run(flow_folder, inputs_mapping, connections=dev_connections)
assert isinstance(batch_result, BatchResult)
assert batch_result.total_lines == batch_result.completed_lines
assert batch_result.node_status == get_flow_expected_status_summary(flow_folder)
# assert aggregation node error summary
assert batch_result.failed_lines == 0
aggre_node_error = batch_result.error_summary.aggr_error_dict["aggregate"]
assert aggre_node_error["message"] == "Execution failure in 'aggregate': (ZeroDivisionError) division by zero"
assert aggre_node_error["code"] == "UserError"
assert aggre_node_error["innerError"] == {"code": "ToolExecutionError", "innerError": None}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_traces.py | from types import GeneratorType
import pytest
from promptflow._utils.dataclass_serializer import serialize
from promptflow.contracts.run_info import Status
from promptflow.executor import FlowExecutor
from ..utils import get_yaml_file
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestExecutorTraces:
def validate_openai_apicall(self, apicall: dict):
"""Validates an apicall dict.
Ensure that the trace output of openai api is a list of dicts.
Args:
apicall (dict): A dictionary representing apicall.
Raises:
AssertionError: If the API call is invalid.
"""
get_trace = False
if apicall.get("name", "") in (
"openai.api_resources.chat_completion.ChatCompletion.create",
"openai.api_resources.completion.Completion.create",
"openai.api_resources.embedding.Embedding.create",
"openai.resources.completions.Completions.create", # openai>=1.0.0
"openai.resources.chat.completions.Completions.create", # openai>=1.0.0
):
get_trace = True
output = apicall.get("output")
assert not isinstance(output, str)
assert isinstance(output, (list, dict))
if isinstance(output, list):
assert all(isinstance(item, dict) for item in output)
children = apicall.get("children", [])
if children is not None:
for child in children:
get_trace = get_trace or self.validate_openai_apicall(child)
return get_trace
def get_chat_input(stream):
return {
"question": "What is the capital of the United States of America?",
"chat_history": [],
"stream": stream,
}
def get_comletion_input(stream):
return {"prompt": "What is the capital of the United States of America?", "stream": stream}
@pytest.mark.parametrize(
"flow_folder, inputs",
[
("openai_chat_api_flow", get_chat_input(False)),
("openai_chat_api_flow", get_chat_input(True)),
("openai_completion_api_flow", get_comletion_input(False)),
("openai_completion_api_flow", get_comletion_input(True)),
("llm_tool", {"topic": "Hello", "stream": False}),
("llm_tool", {"topic": "Hello", "stream": True}),
],
)
def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
assert "total_tokens" in flow_result.run_info.system_metrics
assert flow_result.run_info.system_metrics["total_tokens"] > 0
get_traced = False
for api_call in flow_result.run_info.api_calls:
get_traced = get_traced or self.validate_openai_apicall(serialize(api_call))
assert get_traced is True
def test_executor_generator_tools(self, dev_connections):
executor = FlowExecutor.create(get_yaml_file("generator_tools"), dev_connections)
inputs = {"text": "This is a test"}
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
tool_trace = flow_result.run_info.api_calls[0]["children"][0]
generator_trace = tool_trace.get("children")[0]
assert generator_trace is not None
output = generator_trace.get("output")
assert isinstance(output, list)
@pytest.mark.parametrize("allow_generator_output", [False, True])
def test_trace_behavior_with_generator_node(self, dev_connections, allow_generator_output):
"""Test to verify the trace output list behavior for a flow with a generator node.
This test checks the trace output list in two scenarios based on the 'allow_generator_output' flag:
- When 'allow_generator_output' is True, the output list should initially be empty until the generator is
consumed.
- When 'allow_generator_output' is False, the output list should contain items produced by the generator node.
The test ensures that the trace accurately reflects the generator's consumption status and helps in monitoring
and debugging flow execution.
"""
# Set up executor with a flow that contains a generator node
executor = FlowExecutor.create(get_yaml_file("generator_nodes"), dev_connections)
inputs = {"text": "This is a test"}
# Execute the flow with the given inputs and 'allow_generator_output' setting
flow_result = executor.exec_line(inputs, allow_generator_output=allow_generator_output)
# Verify that the flow execution result is a dictionary and the flow has completed successfully
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
# Extract the trace for the generator node
tool_trace = flow_result.run_info.api_calls[0]["children"][0]
generator_output_trace = tool_trace.get("output")
# Verify that the trace output is a list
assert isinstance(generator_output_trace, list)
if allow_generator_output:
# If generator output is allowed, the trace list should be empty before consumption
assert not generator_output_trace
# Obtain the generator from the flow result
answer_gen = flow_result.output.get("answer")
assert isinstance(answer_gen, GeneratorType)
# Consume the generator and check that it yields text
try:
generated_text = next(answer_gen)
assert isinstance(generated_text, str)
# Verify the trace list contains the most recently generated item
assert generator_output_trace[-1] == generated_text
except StopIteration:
assert False, "Generator did not generate any text"
else:
# If generator output is not allowed, the trace list should contain generated items
assert generator_output_trace
assert all(isinstance(item, str) for item in generator_output_trace)
@pytest.mark.parametrize("flow_file", ["flow_with_trace", "flow_with_trace_async"])
def test_flow_with_trace(self, flow_file, dev_connections):
"""Tests to verify the flows that contains @trace marks.
They should generate traces with "Function" type and nested in the "Tool" traces.
This test case is to verify a flow like following structure, both sync and async mode:
.. code-block::
flow (Flow, 1.5s)
greetings (Tool, 1.5s)
get_user_name (Function, 1.0s)
is_valid_name (Function, 0.5s)
format_greeting (Function, 0.5s)
"""
executor = FlowExecutor.create(get_yaml_file(flow_file), dev_connections)
inputs = {"user_id": 1}
flow_result = executor.exec_line(inputs)
# Assert the run status is completed
assert flow_result.output == {"output": "Hello, User 1!"}
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
# Verify the traces are as expected
api_calls = flow_result.run_info.api_calls
assert len(api_calls) == 1
# Assert the "flow" root level trace
flow_trace = api_calls[0]
assert flow_trace["name"] == "flow"
assert flow_trace["type"] == "Flow"
assert flow_trace["end_time"] - flow_trace["start_time"] == pytest.approx(1.5, abs=0.3)
assert len(flow_trace["children"]) == 1
assert flow_trace["system_metrics"]["duration"] == pytest.approx(1.5, abs=0.3)
assert flow_trace["system_metrics"]["prompt_tokens"] == 0
assert flow_trace["system_metrics"]["completion_tokens"] == 0
assert flow_trace["system_metrics"]["total_tokens"] == 0
# TODO: These assertions should be fixed after added these fields to the top level trace
assert "inputs" not in flow_trace
assert "output" not in flow_trace
assert "error" not in flow_trace
# Assert the "greetings" tool
greetings_trace = flow_trace["children"][0]
assert greetings_trace["name"] == "greetings"
assert greetings_trace["type"] == "Tool"
assert greetings_trace["inputs"] == inputs
assert greetings_trace["output"] == {"greeting": "Hello, User 1!"}
assert greetings_trace["error"] is None
assert greetings_trace["children"] is not None
assert greetings_trace["end_time"] - greetings_trace["start_time"] == pytest.approx(1.5, abs=0.3)
assert len(greetings_trace["children"]) == 2
# TODO: to verfiy the system metrics. This might need to be fixed.
assert greetings_trace["system_metrics"] == {}
# Assert the "get_user_name" function
get_user_name_trace = greetings_trace["children"][0]
assert get_user_name_trace["name"] == "get_user_name"
assert get_user_name_trace["type"] == "Function"
assert get_user_name_trace["inputs"] == {"user_id": 1}
assert get_user_name_trace["output"] == "User 1"
assert get_user_name_trace["error"] is None
assert get_user_name_trace["end_time"] - get_user_name_trace["start_time"] == pytest.approx(1.0, abs=0.2)
assert len(get_user_name_trace["children"]) == 1
# TODO: to verfiy the system metrics. This might need to be fixed.
assert get_user_name_trace["system_metrics"] == {}
# Assert the "get_user_name/is_valid_name" function
is_valid_name_trace = get_user_name_trace["children"][0]
assert is_valid_name_trace["name"] == "is_valid_name"
assert is_valid_name_trace["type"] == "Function"
assert is_valid_name_trace["inputs"] == {"name": "User 1"}
assert is_valid_name_trace["output"] is True
assert is_valid_name_trace["error"] is None
# When running tests in MacOS, it will take longer. So we adjust abs to 0.15 and see if it needs to be extended.
assert is_valid_name_trace["end_time"] - is_valid_name_trace["start_time"] == pytest.approx(0.5, abs=0.15)
assert is_valid_name_trace["children"] == []
# TODO: to verfiy the system metrics. This might need to be fixed.
assert is_valid_name_trace["system_metrics"] == {}
# Assert the "format_greeting" function
format_greeting_trace = greetings_trace["children"][1]
assert format_greeting_trace["name"] == "format_greeting"
assert format_greeting_trace["type"] == "Function"
assert format_greeting_trace["inputs"] == {"user_name": "User 1"}
assert format_greeting_trace["output"] == "Hello, User 1!"
assert format_greeting_trace["error"] is None
# When running tests in MacOS, it will take longer. So we adjust abs to 0.15 and see if it needs to be extended.
assert format_greeting_trace["end_time"] - format_greeting_trace["start_time"] == pytest.approx(0.5, abs=0.15)
assert format_greeting_trace["children"] == []
# TODO: to verfiy the system metrics. This might need to be fixed.
assert format_greeting_trace["system_metrics"] == {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_csharp_executor_proxy.py | import json
import multiprocessing
import threading
from pathlib import Path
from tempfile import mkdtemp
from typing import Optional, Tuple, Union
import pytest
from promptflow._constants import FlowLanguage
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow.batch._batch_engine import BatchEngine
from promptflow.batch._csharp_executor_proxy import CSharpExecutorProxy
from promptflow.batch._result import BatchResult
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, ValidationException
from promptflow.executor._errors import ConnectionNotFound
from promptflow.storage._run_storage import AbstractRunStorage
from ..mock_execution_server import run_executor_server
from ..utils import MemoryRunStorage, get_flow_folder, get_flow_inputs_file, get_yaml_file
@pytest.mark.unittest
class TestCSharpExecutorProxy:
def setup_method(self):
BatchEngine.register_executor(FlowLanguage.CSharp, MockCSharpExecutorProxy)
def test_batch(self):
# submit a batch run
_, batch_result = self._submit_batch_run()
assert batch_result.status == Status.Completed
assert batch_result.completed_lines == batch_result.total_lines
assert batch_result.system_metrics.duration > 0
assert batch_result.completed_lines > 0
def test_batch_execution_error(self):
# submit a batch run
_, batch_result = self._submit_batch_run(has_error=True)
assert batch_result.status == Status.Completed
assert batch_result.total_lines == 3
assert batch_result.failed_lines == 1
assert batch_result.system_metrics.duration > 0
def test_batch_validation_error(self):
# prepare the init error file to mock the validation error
error_message = "'test_connection' not found."
test_exception = ConnectionNotFound(message=error_message)
error_dict = ExceptionPresenter.create(test_exception).to_dict()
init_error_file = Path(mkdtemp()) / "init_error.json"
with open(init_error_file, "w") as file:
json.dump(error_dict, file)
# submit a batch run
with pytest.raises(ValidationException) as e:
self._submit_batch_run(init_error_file=init_error_file)
assert error_message in e.value.message
assert e.value.error_codes == ["UserError", "ValidationError"]
assert e.value.target == ErrorTarget.BATCH
def test_batch_cancel(self):
# use a thread to submit a batch run
batch_engine, batch_run_thread = self._submit_batch_run(run_in_thread=True)
assert batch_engine._is_canceled is False
batch_run_thread.start()
# cancel the batch run
batch_engine.cancel()
batch_run_thread.join()
assert batch_engine._is_canceled is True
assert batch_result_global.status == Status.Canceled
assert batch_result_global.system_metrics.duration > 0
def _submit_batch_run(
self, run_in_thread=False, has_error=False, init_error_file=None
) -> Union[Tuple[BatchEngine, threading.Thread], Tuple[BatchEngine, BatchResult]]:
flow_folder = "csharp_flow"
mem_run_storage = MemoryRunStorage()
# init the batch engine
batch_engine = BatchEngine(
get_yaml_file(flow_folder),
get_flow_folder(flow_folder),
storage=mem_run_storage,
has_error=has_error,
init_error_file=init_error_file,
)
# prepare the inputs
input_dirs = {"data": get_flow_inputs_file(flow_folder)}
inputs_mapping = {"question": "${data.question}"}
output_dir = Path(mkdtemp())
if run_in_thread:
return batch_engine, threading.Thread(
target=self._batch_run_in_thread, args=(batch_engine, input_dirs, inputs_mapping, output_dir)
)
else:
return batch_engine, batch_engine.run(input_dirs, inputs_mapping, output_dir)
def _batch_run_in_thread(self, batch_engine: BatchEngine, input_dirs, inputs_mapping, output_dir):
global batch_result_global
batch_result_global = batch_engine.run(input_dirs, inputs_mapping, output_dir)
class MockCSharpExecutorProxy(CSharpExecutorProxy):
def __init__(self, process: multiprocessing.Process, port: str):
self._process = process
self._port = port
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "MockCSharpExecutorProxy":
"""Create a new executor"""
has_error = kwargs.get("has_error", False)
init_error_file = kwargs.get("init_error_file", None)
port = cls.find_available_port()
process = multiprocessing.Process(
target=run_executor_server,
args=(
int(port),
has_error,
init_error_file,
),
)
process.start()
executor_proxy = cls(process, port)
await executor_proxy.ensure_executor_startup(init_error_file)
return executor_proxy
async def destroy(self):
"""Destroy the executor"""
if self._process and self._process.is_alive():
self._process.terminate()
try:
self._process.join(timeout=5)
except TimeoutError:
self._process.kill()
def _is_executor_active(self):
return self._process and self._process.is_alive()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_batch_timeout.py | from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow.batch import BatchEngine
from promptflow.batch._result import BatchResult, LineError
from promptflow.contracts.run_info import Status
from promptflow.executor._errors import BatchExecutionTimeoutError, LineExecutionTimeoutError
from ..utils import MemoryRunStorage, get_flow_folder, get_flow_inputs_file, get_yaml_file
SAMPLE_FLOW = "web_classification_no_variants"
ONE_LINE_OF_BULK_TEST_TIMEOUT = "one_line_of_bulktest_timeout"
@pytest.mark.usefixtures("use_secrets_config_file", "dev_connections")
@pytest.mark.e2etest
class TestBatchTimeout:
@pytest.mark.parametrize(
"flow_folder",
[
ONE_LINE_OF_BULK_TEST_TIMEOUT,
],
)
def test_batch_with_line_timeout(self, flow_folder, dev_connections):
# set line timeout to 1 second for testing
mem_run_storage = MemoryRunStorage()
batch_engine = BatchEngine(
get_yaml_file(flow_folder),
get_flow_folder(flow_folder),
connections=dev_connections,
storage=mem_run_storage,
)
batch_engine._line_timeout_sec = 5
# prepare input file and output dir
input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name="samples_all_timeout.json")}
output_dir = Path(mkdtemp())
inputs_mapping = {"idx": "${data.idx}"}
batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir)
assert isinstance(batch_results, BatchResult)
assert batch_results.completed_lines == 0
assert batch_results.failed_lines == 2
assert batch_results.total_lines == 2
assert batch_results.node_status == {
"my_python_tool_with_failed_line.canceled": 2,
"my_python_tool.completed": 2,
}
# assert mem_run_storage persists run infos correctly
assert len(mem_run_storage._flow_runs) == 2, "Flow runs are persisted in memory storage."
assert len(mem_run_storage._node_runs) == 4, "Node runs are persisted in memory storage."
msg = "Tool execution is canceled because of the error: Line execution timeout after 5 seconds."
for run in mem_run_storage._node_runs.values():
if run.node == "my_python_tool_with_failed_line":
assert run.status == Status.Canceled
assert run.error["message"] == msg
else:
assert run.status == Status.Completed
assert batch_results.status == Status.Completed
assert batch_results.total_lines == 2
assert batch_results.completed_lines == 0
assert batch_results.failed_lines == 2
assert batch_results.error_summary.failed_user_error_lines == 2
assert batch_results.error_summary.failed_system_error_lines == 0
for i, line_error in enumerate(batch_results.error_summary.error_list):
assert isinstance(line_error, LineError)
assert line_error.error["message"] == f"Line {i} execution timeout for exceeding 5 seconds"
assert line_error.error["code"] == "UserError"
@pytest.mark.parametrize(
"flow_folder",
[
ONE_LINE_OF_BULK_TEST_TIMEOUT,
],
)
def test_batch_with_one_line_timeout(self, flow_folder, dev_connections):
mem_run_storage = MemoryRunStorage()
batch_engine = BatchEngine(
get_yaml_file(flow_folder),
get_flow_folder(flow_folder),
connections=dev_connections,
storage=mem_run_storage,
)
batch_engine._line_timeout_sec = 5
# set line timeout to 5 seconds for testing
# prepare input file and output dir
input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name="samples.json")}
output_dir = Path(mkdtemp())
inputs_mapping = {"idx": "${data.idx}"}
batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir)
assert isinstance(batch_results, BatchResult)
# assert the line status in batch result
assert batch_results.status == Status.Completed
assert batch_results.total_lines == 3
assert batch_results.completed_lines == 2
assert batch_results.failed_lines == 1
assert batch_results.node_status == {
"my_python_tool_with_failed_line.completed": 2,
"my_python_tool_with_failed_line.canceled": 1,
"my_python_tool.completed": 3,
}
# assert the error summary in batch result
assert batch_results.error_summary.failed_user_error_lines == 1
assert batch_results.error_summary.failed_system_error_lines == 0
assert isinstance(batch_results.error_summary.error_list[0], LineError)
assert batch_results.error_summary.error_list[0].line_number == 2
assert batch_results.error_summary.error_list[0].error["code"] == "UserError"
assert batch_results.error_summary.error_list[0].error["referenceCode"] == "Executor"
assert batch_results.error_summary.error_list[0].error["innerError"]["code"] == "LineExecutionTimeoutError"
assert (
batch_results.error_summary.error_list[0].error["message"]
== "Line 2 execution timeout for exceeding 5 seconds"
)
# assert mem_run_storage persists run infos correctly
assert len(mem_run_storage._flow_runs) == 3, "Flow runs are persisted in memory storage."
assert len(mem_run_storage._node_runs) == 6, "Node runs are persisted in memory storage."
@pytest.mark.parametrize(
"flow_folder, line_timeout_sec, batch_timeout_sec, expected_error",
[
(ONE_LINE_OF_BULK_TEST_TIMEOUT, 600, 5, BatchExecutionTimeoutError(2, 5)),
(ONE_LINE_OF_BULK_TEST_TIMEOUT, 3, 600, LineExecutionTimeoutError(2, 3)),
(ONE_LINE_OF_BULK_TEST_TIMEOUT, 3, 5, LineExecutionTimeoutError(2, 3)),
# TODO: Will change to BatchExecutionTimeoutError after refining the implementation of batch timeout.
# (ONE_LINE_OF_BULK_TEST_TIMEOUT, 3, 3, LineExecutionTimeoutError(2, 3)),
],
)
def test_batch_timeout(self, flow_folder, line_timeout_sec, batch_timeout_sec, expected_error):
mem_run_storage = MemoryRunStorage()
batch_engine = BatchEngine(
get_yaml_file(flow_folder),
get_flow_folder(flow_folder),
connections={},
storage=mem_run_storage,
)
batch_engine._line_timeout_sec = line_timeout_sec
batch_engine._batch_timeout_sec = batch_timeout_sec
input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name="samples.json")}
output_dir = Path(mkdtemp())
inputs_mapping = {"idx": "${data.idx}"}
batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir)
assert isinstance(batch_results, BatchResult)
# assert the line status in batch result
assert batch_results.status == Status.Completed
assert batch_results.total_lines == 3
assert batch_results.completed_lines == 2
assert batch_results.failed_lines == 1
assert batch_results.node_status == {
"my_python_tool_with_failed_line.completed": 2,
"my_python_tool_with_failed_line.canceled": 1,
"my_python_tool.completed": 3,
}
# assert the error summary in batch result
assert batch_results.error_summary.failed_user_error_lines == 1
assert batch_results.error_summary.failed_system_error_lines == 0
assert isinstance(batch_results.error_summary.error_list[0], LineError)
assert batch_results.error_summary.error_list[0].line_number == 2
actual_error_dict = batch_results.error_summary.error_list[0].error
expected_error_dict = ExceptionPresenter.create(expected_error).to_dict()
assert actual_error_dict["code"] == expected_error_dict["code"]
assert actual_error_dict["message"] == expected_error_dict["message"]
assert actual_error_dict["referenceCode"] == expected_error_dict["referenceCode"]
assert actual_error_dict["innerError"]["code"] == expected_error_dict["innerError"]["code"]
# assert mem_run_storage persists run infos correctly
assert len(mem_run_storage._flow_runs) == 3, "Flow runs are persisted in memory storage."
# TODO: Currently, the node status is incomplete.
# We will assert the correct result after refining the implementation of batch timeout.
assert len(mem_run_storage._node_runs) == 6, "Node runs are persisted in memory storage."
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_telemetry.py | import json
import uuid
from collections import namedtuple
from importlib.metadata import version
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import patch
import pytest
from promptflow._core.operation_context import OperationContext
from promptflow.batch._batch_engine import OUTPUT_FILE_NAME, BatchEngine
from promptflow.contracts.run_mode import RunMode
from promptflow.executor import FlowExecutor
from ..utils import get_flow_folder, get_flow_inputs_file, get_yaml_file, load_jsonl
IS_LEGACY_OPENAI = version("openai").startswith("0.")
Completion = namedtuple("Completion", ["choices"])
Choice = namedtuple("Choice", ["delta"])
Delta = namedtuple("Delta", ["content"])
def stream_response(kwargs):
if IS_LEGACY_OPENAI:
delta = Delta(content=json.dumps(kwargs.get("headers", {})))
yield Completion(choices=[{"delta": delta}])
else:
delta = Delta(content=json.dumps(kwargs.get("extra_headers", {})))
yield Completion(choices=[Choice(delta=delta)])
def mock_stream_chat(*args, **kwargs):
return stream_response(kwargs)
@pytest.mark.skip(reason="Skip on Mac and Windows and Linux, patch does not work in the spawn process")
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestExecutorTelemetry:
def test_executor_openai_telemetry(self, dev_connections):
"""This test validates telemetry info header is correctly injected to OpenAI API
by mocking chat api method. The mock method will return a generator that yields a
namedtuple with a json string of the headers passed to the method.
"""
if IS_LEGACY_OPENAI:
api = "openai.ChatCompletion.create"
else:
api = "openai.resources.chat.Completions.create"
with patch(api, new=mock_stream_chat):
operation_context = OperationContext.get_instance()
operation_context.clear()
flow_folder = "openai_chat_api_flow"
# Set user-defined properties `scenario` in context
operation_context.scenario = "test"
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
# flow run case
inputs = {"question": "What's your name?", "chat_history": [], "stream": True}
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
headers = json.loads(flow_result.output.get("answer", ""))
assert "promptflow/" in headers.get("x-ms-useragent")
assert headers.get("ms-azure-ai-promptflow-scenario") == "test"
assert headers.get("ms-azure-ai-promptflow-run-mode") == RunMode.Test.name
# batch run case
run_id = str(uuid.uuid4())
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections
)
input_dirs = {"data": get_flow_inputs_file(flow_folder)}
inputs_mapping = {"question": "${data.question}", "chat_history": "${data.chat_history}"}
output_dir = Path(mkdtemp())
batch_engine.run(input_dirs, inputs_mapping, output_dir, run_id=run_id)
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
for line in outputs:
headers = json.loads(line.get("answer", ""))
assert "promptflow/" in headers.get("x-ms-useragent")
assert headers.get("ms-azure-ai-promptflow-scenario") == "test"
assert headers.get("ms-azure-ai-promptflow-run-mode") == RunMode.Batch.name
# single_node case
run_info = FlowExecutor.load_and_exec_node(
get_yaml_file("openai_chat_api_flow"),
"chat",
flow_inputs=inputs,
connections=dev_connections,
raise_ex=True,
)
assert run_info.output is not None
headers = json.loads(run_info.output)
assert "promptflow/" in headers.get("x-ms-useragent")
assert headers.get("ms-azure-ai-promptflow-scenario") == "test"
assert headers.get("ms-azure-ai-promptflow-run-mode") == RunMode.SingleNode.name
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_async.py | import os
import pytest
from promptflow.executor import FlowExecutor
from ..utils import get_flow_folder, get_yaml_file
@pytest.mark.e2etest
class TestAsync:
@pytest.mark.parametrize(
"folder_name, concurrency_levels, expected_concurrency",
[
("async_tools", [1, 2, 3], [1, 2, 2]),
("async_tools_with_sync_tools", [1, 2, 3], [1, 2, 2]),
],
)
def test_executor_node_concurrency(self, folder_name, concurrency_levels, expected_concurrency):
os.chdir(get_flow_folder(folder_name))
executor = FlowExecutor.create(get_yaml_file(folder_name), {})
def calculate_max_concurrency(flow_result):
timeline = []
api_calls = flow_result.run_info.api_calls[0]["children"]
for api_call in api_calls:
timeline.append(("start", api_call["start_time"]))
timeline.append(("end", api_call["end_time"]))
timeline.sort(key=lambda x: x[1])
current_concurrency = 0
max_concurrency = 0
for event, _ in timeline:
if event == "start":
current_concurrency += 1
max_concurrency = max(max_concurrency, current_concurrency)
elif event == "end":
current_concurrency -= 1
return max_concurrency
for i in range(len(concurrency_levels)):
concurrency = concurrency_levels[i]
flow_result = executor.exec_line({"input_str": "Hello"}, node_concurrency=concurrency)
max_concurrency = calculate_max_concurrency(flow_result)
assert max_concurrency == expected_concurrency[i]
assert max_concurrency <= concurrency
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_image.py | import os
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._utils.multimedia_utils import MIME_PATTERN, _create_image_from_file, _is_url, is_multimedia_dict
from promptflow.batch._batch_engine import OUTPUT_FILE_NAME, BatchEngine
from promptflow.batch._result import BatchResult
from promptflow.contracts.multimedia import Image
from promptflow.contracts.run_info import FlowRunInfo, RunInfo, Status
from promptflow.executor import FlowExecutor
from promptflow.storage._run_storage import DefaultRunStorage
from ..utils import get_flow_folder, get_yaml_file, is_image_file, is_jsonl_file, load_jsonl
SIMPLE_IMAGE_FLOW = "python_tool_with_simple_image"
SAMPLE_IMAGE_FLOW_WITH_DEFAULT = "python_tool_with_simple_image_with_default"
SIMPLE_IMAGE_WITH_INVALID_DEFAULT_VALUE_FLOW = "python_tool_with_invalid_default_value"
COMPOSITE_IMAGE_FLOW = "python_tool_with_composite_image"
CHAT_FLOW_WITH_IMAGE = "chat_flow_with_image"
EVAL_FLOW_WITH_SIMPLE_IMAGE = "eval_flow_with_simple_image"
EVAL_FLOW_WITH_COMPOSITE_IMAGE = "eval_flow_with_composite_image"
NESTED_API_CALLS_FLOW = "python_tool_with_image_nested_api_calls"
IMAGE_URL = (
"https://raw.githubusercontent.com/microsoft/promptflow/main/src/promptflow/tests/test_configs/datas/logo.jpg"
)
def get_test_cases_for_simple_input(flow_folder):
working_dir = get_flow_folder(flow_folder)
image = _create_image_from_file(working_dir / "logo.jpg")
inputs = [
{"data:image/jpg;path": str(working_dir / "logo.jpg")},
{"data:image/jpg;base64": image.to_base64()},
{"data:image/jpg;url": IMAGE_URL},
str(working_dir / "logo.jpg"),
image.to_base64(),
IMAGE_URL,
]
return [(flow_folder, {"image": input}) for input in inputs]
def get_test_cases_for_composite_input(flow_folder):
working_dir = get_flow_folder(flow_folder)
image_1 = _create_image_from_file(working_dir / "logo.jpg")
image_2 = _create_image_from_file(working_dir / "logo_2.png")
inputs = [
[
{"data:image/jpg;path": str(working_dir / "logo.jpg")},
{"data:image/png;path": str(working_dir / "logo_2.png")},
],
[{"data:image/jpg;base64": image_1.to_base64()}, {"data:image/png;base64": image_2.to_base64()}],
[{"data:image/jpg;url": IMAGE_URL}, {"data:image/png;url": IMAGE_URL}],
]
return [
(flow_folder, {"image_list": input, "image_dict": {"image_1": input[0], "image_2": input[1]}})
for input in inputs
]
def get_test_cases_for_node_run():
image = {"data:image/jpg;path": str(get_flow_folder(SIMPLE_IMAGE_FLOW) / "logo.jpg")}
simple_image_input = {"image": image}
image_list = [{"data:image/jpg;path": "logo.jpg"}, {"data:image/png;path": "logo_2.png"}]
image_dict = {
"image_dict": {
"image_1": {"data:image/jpg;path": "logo.jpg"},
"image_2": {"data:image/png;path": "logo_2.png"},
}
}
composite_image_input = {"image_list": image_list, "image_dcit": image_dict}
return [
(SIMPLE_IMAGE_FLOW, "python_node", simple_image_input, None),
(SIMPLE_IMAGE_FLOW, "python_node_2", simple_image_input, {"python_node": image}),
(COMPOSITE_IMAGE_FLOW, "python_node", composite_image_input, None),
(COMPOSITE_IMAGE_FLOW, "python_node_2", composite_image_input, None),
(
COMPOSITE_IMAGE_FLOW,
"python_node_3",
composite_image_input,
{"python_node": image_list, "python_node_2": image_dict},
),
]
def contain_image_reference(value, parent_path="temp"):
if isinstance(value, (FlowRunInfo, RunInfo)):
assert contain_image_reference(value.api_calls, parent_path)
assert contain_image_reference(value.inputs, parent_path)
assert contain_image_reference(value.output, parent_path)
return True
assert not isinstance(value, Image)
if isinstance(value, list):
return any(contain_image_reference(item, parent_path) for item in value)
if isinstance(value, dict):
if is_multimedia_dict(value):
v = list(value.values())[0]
assert isinstance(v, str)
assert _is_url(v) or str(Path(v).parent) == parent_path
return True
return any(contain_image_reference(v, parent_path) for v in value.values())
return False
def contain_image_object(value):
if isinstance(value, list):
return any(contain_image_object(item) for item in value)
elif isinstance(value, dict):
assert not is_multimedia_dict(value)
return any(contain_image_object(v) for v in value.values())
else:
return isinstance(value, Image)
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestExecutorWithImage:
@pytest.mark.parametrize(
"flow_folder, inputs",
get_test_cases_for_simple_input(SIMPLE_IMAGE_FLOW)
+ get_test_cases_for_composite_input(COMPOSITE_IMAGE_FLOW)
+ [(CHAT_FLOW_WITH_IMAGE, {}), (NESTED_API_CALLS_FLOW, {})],
)
def test_executor_exec_line_with_image(self, flow_folder, inputs, dev_connections):
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp"))
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections, storage=storage)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert contain_image_object(flow_result.output)
# Assert output also contains plain text.
assert any(isinstance(v, str) for v in flow_result.output)
assert flow_result.run_info.status == Status.Completed
assert contain_image_reference(flow_result.run_info)
for _, node_run_info in flow_result.node_run_infos.items():
assert node_run_info.status == Status.Completed
assert contain_image_reference(node_run_info)
@pytest.mark.parametrize(
"flow_folder, node_name, flow_inputs, dependency_nodes_outputs", get_test_cases_for_node_run()
)
def test_executor_exec_node_with_image(
self, flow_folder, node_name, flow_inputs, dependency_nodes_outputs, dev_connections
):
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp"))
run_info = FlowExecutor.load_and_exec_node(
get_yaml_file(flow_folder),
node_name,
flow_inputs=flow_inputs,
dependency_nodes_outputs=dependency_nodes_outputs,
connections=dev_connections,
storage=storage,
raise_ex=True,
)
assert run_info.status == Status.Completed
assert contain_image_reference(run_info)
# Assert image could be persisted to the specified path.
@pytest.mark.parametrize(
"output_sub_dir, assign_storage, expected_path",
[
("test_path", True, "test_storage"),
("test_path", False, "test_path"),
(None, True, "test_storage"),
(None, False, "."),
],
)
def test_executor_exec_node_with_image_storage_and_path(self, output_sub_dir, assign_storage, expected_path):
flow_folder = SIMPLE_IMAGE_FLOW
node_name = "python_node"
image = {"data:image/jpg;path": str(get_flow_folder(SIMPLE_IMAGE_FLOW) / "logo.jpg")}
flow_inputs = {"image": image}
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./test_storage"))
run_info = FlowExecutor.load_and_exec_node(
get_yaml_file(flow_folder),
node_name,
flow_inputs=flow_inputs,
dependency_nodes_outputs=None,
connections=None,
storage=storage if assign_storage else None,
output_sub_dir=output_sub_dir,
raise_ex=True,
)
assert run_info.status == Status.Completed
assert contain_image_reference(run_info, parent_path=expected_path)
@pytest.mark.parametrize(
"flow_folder, node_name, flow_inputs, dependency_nodes_outputs",
[
(
SIMPLE_IMAGE_WITH_INVALID_DEFAULT_VALUE_FLOW,
"python_node_2",
{},
{
"python_node": {
"data:image/jpg;path": str(
get_flow_folder(SIMPLE_IMAGE_WITH_INVALID_DEFAULT_VALUE_FLOW) / "logo.jpg"
)
}
},
)
],
)
def test_executor_exec_node_with_invalid_default_value(
self, flow_folder, node_name, flow_inputs, dependency_nodes_outputs, dev_connections
):
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp"))
run_info = FlowExecutor.load_and_exec_node(
get_yaml_file(flow_folder),
node_name,
flow_inputs=flow_inputs,
dependency_nodes_outputs=dependency_nodes_outputs,
connections=dev_connections,
storage=storage,
raise_ex=True,
)
assert run_info.status == Status.Completed
assert contain_image_reference(run_info)
@pytest.mark.parametrize(
"flow_folder, input_dirs, inputs_mapping, output_key, expected_outputs_number, has_aggregation_node",
[
(
SIMPLE_IMAGE_FLOW,
{"data": "."},
{"image": "${data.image}"},
"output",
4,
False,
),
(
SAMPLE_IMAGE_FLOW_WITH_DEFAULT,
{"data": "."},
{"image_2": "${data.image_2}"},
"output",
4,
False,
),
(
COMPOSITE_IMAGE_FLOW,
{"data": "inputs.jsonl"},
{"image_list": "${data.image_list}", "image_dict": "${data.image_dict}"},
"output",
2,
False,
),
(
CHAT_FLOW_WITH_IMAGE,
{"data": "inputs.jsonl"},
{"question": "${data.question}", "chat_history": "${data.chat_history}"},
"answer",
2,
False,
),
(
EVAL_FLOW_WITH_SIMPLE_IMAGE,
{"data": "inputs.jsonl"},
{"image": "${data.image}"},
"output",
2,
True,
),
(
EVAL_FLOW_WITH_COMPOSITE_IMAGE,
{"data": "inputs.jsonl"},
{"image_list": "${data.image_list}", "image_dict": "${data.image_dict}"},
"output",
2,
True,
),
],
)
def test_batch_engine_with_image(
self, flow_folder, input_dirs, inputs_mapping, output_key, expected_outputs_number, has_aggregation_node
):
flow_file = get_yaml_file(flow_folder)
working_dir = get_flow_folder(flow_folder)
output_dir = Path(mkdtemp())
batch_result = BatchEngine(flow_file, working_dir).run(
input_dirs, inputs_mapping, output_dir, max_lines_count=4
)
assert isinstance(batch_result, BatchResult)
assert batch_result.completed_lines == expected_outputs_number
assert all(is_jsonl_file(output_file) or is_image_file(output_file) for output_file in output_dir.iterdir())
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == expected_outputs_number
for i, output in enumerate(outputs):
assert isinstance(output, dict)
assert "line_number" in output, f"line_number is not in {i}th output {output}"
assert output["line_number"] == i, f"line_number is not correct in {i}th output {output}"
result = output[output_key][0] if isinstance(output[output_key], list) else output[output_key]
assert all(MIME_PATTERN.search(key) for key in result), f"image is not in {i}th output {output}"
@pytest.mark.parametrize(
"flow_folder, inputs",
get_test_cases_for_simple_input(EVAL_FLOW_WITH_SIMPLE_IMAGE)
+ get_test_cases_for_composite_input(EVAL_FLOW_WITH_COMPOSITE_IMAGE),
)
def test_executor_exec_aggregation_with_image(self, flow_folder, inputs, dev_connections):
working_dir = get_flow_folder(flow_folder)
os.chdir(working_dir)
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp"))
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections, storage=storage)
flow_result = executor.exec_line(inputs, index=0)
flow_inputs = {k: [v] for k, v in inputs.items()}
aggregation_inputs = {k: [v] for k, v in flow_result.aggregation_inputs.items()}
aggregation_results = executor.exec_aggregation(flow_inputs, aggregation_inputs=aggregation_inputs)
for _, node_run_info in aggregation_results.node_run_infos.items():
assert node_run_info.status == Status.Completed
assert contain_image_reference(node_run_info)
def test_batch_run_then_eval_with_image(self):
# submit a flow in batch mode fisrt
batch_flow_folder = get_flow_folder(COMPOSITE_IMAGE_FLOW)
batch_flow_file = get_yaml_file(batch_flow_folder)
batch_working_dir = get_flow_folder(batch_flow_folder)
batch_output_dir = Path(mkdtemp())
batch_input_dirs = {"data": "inputs.jsonl"}
batch_inputs_mapping = {"image_list": "${data.image_list}", "image_dict": "${data.image_dict}"}
batch_result = BatchEngine(batch_flow_file, batch_working_dir).run(
batch_input_dirs, batch_inputs_mapping, batch_output_dir
)
assert batch_result.completed_lines == batch_result.total_lines
# use the output of batch run as input of eval flow
eval_flow_folder = get_flow_folder(EVAL_FLOW_WITH_COMPOSITE_IMAGE)
eval_flow_file = get_yaml_file(eval_flow_folder)
eval_working_dir = get_flow_folder(eval_flow_folder)
eval_output_dir = Path(mkdtemp())
eval_input_dirs = {
"data": batch_flow_folder / "inputs.jsonl",
"run.outputs": batch_output_dir / OUTPUT_FILE_NAME,
}
eval_inputs_mapping = {"image_list": "${run.outputs.output}", "image_dict": "${data.image_dict}"}
eval_result = BatchEngine(eval_flow_file, eval_working_dir).run(
eval_input_dirs, eval_inputs_mapping, eval_output_dir
)
assert eval_result.completed_lines == eval_result.total_lines
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/e2etests/test_eager_flow.py | import os
from dataclasses import is_dataclass
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow.batch._batch_engine import OUTPUT_FILE_NAME, BatchEngine
from promptflow.batch._result import BatchResult, LineResult
from promptflow.contracts.run_info import Status
from promptflow.executor._script_executor import ScriptExecutor
from promptflow.executor.flow_executor import FlowExecutor
from ..utils import (
EAGER_FLOW_ROOT,
get_bulk_inputs_from_jsonl,
get_entry_file,
get_flow_folder,
get_flow_inputs_file,
get_yaml_file,
load_jsonl,
)
SAMPLE_FLOW = "web_classification_no_variants"
SAMPLE_EVAL_FLOW = "classification_accuracy_evaluation"
SAMPLE_FLOW_WITH_PARTIAL_FAILURE = "python_tool_partial_failure"
def validate_batch_result(batch_result: BatchResult, flow_folder, output_dir, ensure_output):
assert isinstance(batch_result, BatchResult)
nlines = len(get_bulk_inputs_from_jsonl(flow_folder, root=EAGER_FLOW_ROOT))
assert batch_result.total_lines == nlines
assert batch_result.completed_lines == nlines
assert batch_result.start_time < batch_result.end_time
assert batch_result.system_metrics.duration > 0
outputs = load_jsonl(output_dir / OUTPUT_FILE_NAME)
assert len(outputs) == nlines
for i, output in enumerate(outputs):
assert isinstance(output, dict)
assert "line_number" in output, f"line_number is not in {i}th output {output}"
assert output["line_number"] == i, f"line_number is not correct in {i}th output {output}"
assert ensure_output(output)
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestEagerFlow:
@pytest.mark.parametrize(
"flow_folder, entry, inputs, ensure_output",
[
(
"dummy_flow_with_trace",
"my_flow",
{"text": "text", "models": ["model"]},
lambda x: x == "dummy_output"
),
(
"flow_with_dataclass_output",
"my_flow",
{"text": "text", "models": ["model"]},
lambda x: is_dataclass(x) and x.text == "text" and x.models == ["model"]
),
]
)
def test_flow_run(self, flow_folder, entry, inputs, ensure_output):
# Test submitting eager flow to script executor
flow_file = get_entry_file(flow_folder, root=EAGER_FLOW_ROOT)
executor = ScriptExecutor(flow_file=flow_file, entry=entry)
line_result = executor.exec_line(inputs=inputs, index=0)
assert isinstance(line_result, LineResult)
assert ensure_output(line_result.output)
# Test submitting eager flow to flow executor
working_dir = get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT)
os.chdir(working_dir)
flow_file = get_yaml_file(flow_folder, root=EAGER_FLOW_ROOT)
executor = FlowExecutor.create(flow_file=flow_file, connections={})
line_result = executor.exec_line(inputs=inputs, index=0)
assert isinstance(line_result, LineResult)
assert ensure_output(line_result.output)
@pytest.mark.parametrize(
"flow_folder, inputs, ensure_output",
[
(
"dummy_flow_with_trace",
{"text": "text", "models": ["model"]},
lambda x: x == "dummy_output"
),
(
"flow_with_dataclass_output",
{"text": "text", "models": ["model"]},
lambda x: is_dataclass(x) and x.text == "text" and x.models == ["model"]
),
]
)
def test_flow_run_with_flow_yaml(self, flow_folder, inputs, ensure_output):
working_dir = get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT)
os.chdir(working_dir)
flow_file = get_yaml_file(flow_folder, root=EAGER_FLOW_ROOT)
executor = FlowExecutor.create(flow_file=flow_file, connections={})
line_result = executor.exec_line(inputs=inputs, index=0)
assert isinstance(line_result, LineResult)
assert ensure_output(line_result.output)
def test_exec_line_with_invalid_case(self):
flow_file = get_entry_file("dummy_flow_with_exception", root=EAGER_FLOW_ROOT)
executor = ScriptExecutor(flow_file=flow_file, entry="my_flow")
line_result = executor.exec_line(inputs={"text": "text"}, index=0)
assert isinstance(line_result, LineResult)
assert line_result.output is None
assert line_result.run_info.status == Status.Failed
assert "dummy exception" in line_result.run_info.error["message"]
@pytest.mark.parametrize(
"flow_folder, inputs_mapping, entry, ensure_output",
[
(
"dummy_flow_with_trace",
{"text": "${data.text}", "models": "${data.models}"},
"my_flow",
lambda x: "output" in x and x["output"] == "dummy_output",
),
(
"flow_with_dataclass_output",
{"text": "${data.text}", "models": "${data.models}"},
"my_flow",
lambda x: x["text"] == "text" and isinstance(x["models"], list),
),
(
"flow_with_dataclass_output",
{}, # if inputs_mapping is empty, then the inputs will be the default value
"my_flow",
lambda x: x["text"] == "default_text" and x["models"] == ["default_model"],
)
]
)
def test_batch_run(self, flow_folder, entry, inputs_mapping, ensure_output):
batch_engine = BatchEngine(
get_entry_file(flow_folder, root=EAGER_FLOW_ROOT),
get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT),
entry=entry,
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, root=EAGER_FLOW_ROOT)}
output_dir = Path(mkdtemp())
batch_result = batch_engine.run(input_dirs, inputs_mapping, output_dir)
validate_batch_result(batch_result, flow_folder, output_dir, ensure_output)
@pytest.mark.parametrize(
"flow_folder, inputs_mapping, ensure_output",
[
(
"dummy_flow_with_trace",
{"text": "${data.text}", "models": "${data.models}"},
lambda x: "output" in x and x["output"] == "dummy_output",
),
(
"flow_with_dataclass_output",
{"text": "${data.text}", "models": "${data.models}"},
lambda x: x["text"] == "text" and isinstance(x["models"], list),
),
]
)
def test_batch_run_with_flow_yaml(self, flow_folder, inputs_mapping, ensure_output):
batch_engine = BatchEngine(
get_yaml_file(flow_folder, root=EAGER_FLOW_ROOT),
get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT),
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, root=EAGER_FLOW_ROOT)}
output_dir = Path(mkdtemp())
batch_result = batch_engine.run(input_dirs, inputs_mapping, output_dir)
validate_batch_result(batch_result, flow_folder, output_dir, ensure_output)
def test_batch_run_with_invalid_case(self):
flow_folder = "dummy_flow_with_exception"
batch_engine = BatchEngine(
get_entry_file(flow_folder, root=EAGER_FLOW_ROOT),
get_flow_folder(flow_folder, root=EAGER_FLOW_ROOT),
entry="my_flow",
)
input_dirs = {"data": get_flow_inputs_file(flow_folder, root=EAGER_FLOW_ROOT)}
output_dir = Path(mkdtemp())
batch_result = batch_engine.run(input_dirs, {"text": "${data.text}"}, output_dir)
assert isinstance(batch_result, BatchResult)
nlines = len(get_bulk_inputs_from_jsonl(flow_folder, root=EAGER_FLOW_ROOT))
assert batch_result.total_lines == nlines
assert batch_result.failed_lines == nlines
assert batch_result.start_time < batch_result.end_time
assert batch_result.system_metrics.duration > 0
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool.py | from jinja2 import Template
from promptflow import ToolProvider, tool
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.types import PromptTemplate
class TestCustomLLMTool(ToolProvider):
def __init__(self, connection: AzureOpenAIConnection):
super().__init__()
self.connection = connection
@tool
def call(self, connection_2: AzureOpenAIConnection, api: str, template: PromptTemplate, **kwargs):
prompt = Template(template, trim_blocks=True, keep_trailing_newline=True).render(**kwargs)
assert isinstance(self.connection, AzureOpenAIConnection)
assert isinstance(connection_2, AzureOpenAIConnection)
assert api in ["completion", "chat"]
return prompt
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error.py | from promptflow import ToolProvider, tool
class TestLoadErrorTool(ToolProvider):
def __init__(self):
raise Exception("Tool load error.")
@tool
def tool(self, name: str):
return name
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection.py | from dataclasses import dataclass
from promptflow import tool
from promptflow._core.tools_manager import register_connections
from promptflow.contracts.types import Secret
@dataclass
class TestConnection:
name: str
secret: Secret
register_connections(TestConnection)
@tool
def tool_with_test_conn(conn: TestConnection):
assert isinstance(conn, TestConnection)
return conn.name + conn.secret
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/my_prompt.jinja2 | {# Please replace the template with your own prompt. #}
Write a simple program that displays the greeting message: "{{text}}" when executed.
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/package_tool_definition.json | {
"custom_llm_tool.TestCustomLLMTool.call": {
"class_name": "TestCustomLLMTool",
"function": "call",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"connection_2": {"type": ["AzureOpenAIConnection"]},
"api": {"type": ["string"]},
"template": {"type": ["PromptTemplate"]}
},
"module": "custom_llm_tool",
"name": "Test Custom LLM Tool",
"description": "Test Custom LLM Tool",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/flow.dag.yaml | inputs:
text:
type: string
outputs:
output:
type: string
reference: ${my_custom_llm_tool.output}
nodes:
- name: my_custom_llm_tool
type: custom_llm
source:
type: package_with_prompt
tool: custom_llm_tool.TestCustomLLMTool.call
path: ./my_prompt.jinja2
inputs:
connection: azure_open_ai_connection
connection_2: azure_open_ai_connection
api: completion
text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/inputs.json | {}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/samples.json | [
{
"text": "Hello"
},
{
"text": "Hello World!"
}
]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/package_tool_definition.json | {
"custom_llm_tool.TestCustomLLMTool.call": {
"class_name": "TestCustomLLMTool",
"function": "call",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"api": {"type": ["string"]},
"template": {"type": ["PromptTemplate"]}
},
"module": "custom_llm_tool",
"name": "Test Custom LLM Tool",
"description": "Test Custom LLM Tool",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/flow.dag.yaml | inputs:
text:
type: string
outputs:
output:
type: string
reference: ${custom_llm_tool_with_duplicated_inputs.output}
nodes:
- name: custom_llm_tool_with_duplicated_inputs
type: custom_llm
source:
type: package_with_prompt
tool: custom_llm_tool.TestCustomLLMTool.call
path: ./prompt_with_duplicated_inputs.jinja2
inputs:
connection: azure_open_ai_connection
api: completion
text: ${inputs.text}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/prompt_with_duplicated_inputs.jinja2 | {{api}}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error/package_tool_definition.json | {
"tool_with_init_error": {
"class_name": "TestLoadErrorTool",
"function": "tool",
"inputs": {
"name": {"type": ["string"]}
},
"module": "tool_with_init_error",
"name": "Tool with init error",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error/flow.dag.yaml | inputs: {}
outputs: {}
nodes:
- name: tool_with_init_error
type: python
source:
type: package
tool: tool_with_init_error
inputs:
name: test_name
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection/package_tool_definition.json | {
"tool_with_connection": {
"function": "tool_with_test_conn",
"inputs": {
"conn": {"type": ["TestConnection"]}
},
"module": "tool_with_connection",
"name": "Test Tool with Connection",
"type": "python"
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools | promptflow_repo/promptflow/src/promptflow/tests/executor/package_tools/tool_with_connection/flow.dag.yaml | inputs: {}
outputs: {}
nodes:
- name: tool_with_conn
type: python
source:
type: package
tool: tool_with_connection
inputs:
conn: test_conn
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_management.py | import json
import pytest
from promptflow._sdk._constants import VIS_JS_BUNDLE_FILENAME
from promptflow.contracts._run_management import VisualizationRender
@pytest.mark.unittest
def test_visualization_render():
data = {"key": "value"}
viz = VisualizationRender(data)
assert viz.data == json.dumps(json.dumps(data))
assert viz.js_path == VIS_JS_BUNDLE_FILENAME
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_flow.py | from pathlib import Path
import pytest
from promptflow._sdk.entities._connection import AzureContentSafetyConnection
from promptflow.contracts._errors import FailedToImportModule
from promptflow.contracts.flow import (
Flow,
FlowInputAssignment,
FlowInputDefinition,
FlowOutputDefinition,
InputAssignment,
InputValueType,
Node,
NodeVariant,
NodeVariants,
ToolSource,
ToolSourceType,
)
from promptflow.contracts.tool import Tool, ToolType, ValueType
from ...utils import EAGER_FLOWS_ROOT, FLOW_ROOT, get_flow_folder, get_flow_package_tool_definition, get_yaml_file
PACKAGE_TOOL_BASE = Path(__file__).parent.parent.parent / "package_tools"
@pytest.mark.e2etest
class TestFlowContract:
@pytest.mark.parametrize(
"flow_folder, expected_connection_names",
[
("web_classification", {"azure_open_ai_connection"}),
("basic-with-connection", {"azure_open_ai_connection"}),
("flow_with_dict_input_with_variant", {"mock_custom_connection"}),
],
)
def test_flow_get_connection_names(self, flow_folder, expected_connection_names):
flow_yaml = get_yaml_file(flow_folder)
flow = Flow.from_yaml(flow_yaml)
assert flow.get_connection_names() == expected_connection_names
def test_flow_get_connection_input_names_for_node_with_variants(self):
# Connection input exists only in python node
flow_folder = "flow_with_dict_input_with_variant"
flow_yaml = get_yaml_file(flow_folder)
flow = Flow.from_yaml(flow_yaml)
assert flow.get_connection_input_names_for_node("print_val") == ["conn"]
def test_flow_get_connection_names_with_package_tool(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
flow_file = flow_folder / "flow.dag.yaml"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tool_definition)
flow = Flow.from_yaml(flow_file)
connection_names = flow.get_connection_names()
assert connection_names == {"azure_open_ai_connection"}
def test_flow_get_connection_input_names_for_node(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
flow_file = flow_folder / "flow.dag.yaml"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tool_definition)
flow = Flow.from_yaml(flow_file)
connection_names = flow.get_connection_input_names_for_node(flow.nodes[0].name)
assert connection_names == ["connection", "connection_2"]
assert flow.get_connection_input_names_for_node("not_exist") == []
@pytest.mark.parametrize(
"flow_folder_name, environment_variables_overrides, except_environment_variables",
[
pytest.param(
"flow_with_environment_variables",
{"env2": "runtime_env2", "env10": "aaaaa"},
{
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
},
id="LoadEnvVariablesWithOverrides",
),
pytest.param(
"flow_with_environment_variables",
None,
{
"env1": "2",
"env2": "spawn",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
},
id="LoadEnvVariablesWithoutOverrides",
),
pytest.param(
"simple_hello_world",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesWithoutYamlLevelEnvVariables",
),
],
)
def test_flow_get_environment_variables_with_overrides(
self, flow_folder_name, environment_variables_overrides, except_environment_variables
):
flow_folder = get_flow_folder(flow_folder_name)
flow_file = "flow.dag.yaml"
flow = Flow.from_yaml(flow_file=flow_file, working_dir=flow_folder)
merged_environment_variables = flow.get_environment_variables_with_overrides(
environment_variables_overrides=environment_variables_overrides,
)
assert merged_environment_variables == except_environment_variables
@pytest.mark.parametrize(
"flow_folder_name, folder_root, flow_file, environment_variables_overrides, except_environment_variables",
[
pytest.param(
"flow_with_environment_variables",
FLOW_ROOT,
"flow.dag.yaml",
{"env2": "runtime_env2", "env10": "aaaaa"},
{
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
},
id="LoadEnvVariablesWithOverrides",
),
pytest.param(
"flow_with_environment_variables",
FLOW_ROOT,
"flow.dag.yaml",
None,
{
"env1": "2",
"env2": "spawn",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
},
id="LoadEnvVariablesWithoutOverrides",
),
pytest.param(
"simple_hello_world",
FLOW_ROOT,
"flow.dag.yaml",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesWithoutYamlLevelEnvVariables",
),
pytest.param(
"simple_with_yaml",
EAGER_FLOWS_ROOT,
"entry.py",
None,
{},
id="LoadEnvVariablesForEagerFlow",
),
pytest.param(
"simple_with_yaml",
EAGER_FLOWS_ROOT,
"entry.py",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesForEagerFlowWithOverrides",
),
],
)
def test_load_env_variables(
self, flow_folder_name, folder_root, flow_file, environment_variables_overrides, except_environment_variables
):
flow_folder = get_flow_folder(flow_folder_name, folder_root)
merged_environment_variables = Flow.load_env_variables(
flow_file=flow_file,
working_dir=flow_folder,
environment_variables_overrides=environment_variables_overrides,
)
assert merged_environment_variables == except_environment_variables
@pytest.mark.unittest
class TestFlow:
@pytest.mark.parametrize(
"flow, expected_value",
[
(
Flow(id="flow_id", name="flow_name", nodes=[], inputs={}, outputs={}, tools=[]),
{
"id": "flow_id",
"name": "flow_name",
"nodes": [],
"inputs": {},
"outputs": {},
"tools": [],
"language": "python",
},
),
(
Flow(
id="flow_id",
name="flow_name",
nodes=[Node(name="node1", tool="tool1", inputs={})],
inputs={"input1": FlowInputDefinition(type=ValueType.STRING)},
outputs={"output1": FlowOutputDefinition(type=ValueType.STRING, reference=None)},
tools=[],
),
{
"id": "flow_id",
"name": "flow_name",
"nodes": [{"name": "node1", "tool": "tool1", "inputs": {}}],
"inputs": {"input1": {"type": ValueType.STRING.value}},
"outputs": {"output1": {"type": ValueType.STRING.value}},
"tools": [],
"language": "python",
},
),
],
)
def test_flow_serialize(self, flow, expected_value):
assert flow.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"id": "flow_id",
"name": "flow_name",
"nodes": [{"name": "node1", "tool": "tool1", "inputs": {}, "outputs": {}}],
"inputs": {"input1": {"type": ValueType.STRING.value}},
"outputs": {"output1": {"type": ValueType.STRING.value}},
"tools": [],
},
Flow(
id="flow_id",
name="flow_name",
nodes=[Node(name="node1", tool="tool1", inputs={})],
inputs={
"input1": FlowInputDefinition(
type=ValueType.STRING, description="", enum=[], is_chat_input=False, is_chat_history=None
)
},
outputs={
"output1": FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment(
value="", value_type=InputValueType.LITERAL, section="", property=""
),
description="",
evaluation_only=False,
is_chat_output=False,
)
},
tools=[],
node_variants={},
program_language="python",
environment_variables={},
),
),
],
)
def test_flow_deserialize(self, data, expected_value):
assert Flow.deserialize(data) == expected_value
def test_import_requisites(self):
tool1 = Tool(name="tool1", type=ToolType.PYTHON, inputs={}, module="yaml")
tool2 = Tool(name="tool2", type=ToolType.PYTHON, inputs={}, module="module")
node1 = Node(name="node1", tool="tool1", inputs={}, module="yaml")
node2 = Node(name="node2", tool="tool2", inputs={}, module="module")
with pytest.raises(FailedToImportModule) as e:
Flow._import_requisites([tool1], [node2])
assert str(e.value).startswith(
"Failed to import modules with error: Import node 'node2' provider module 'module' failed."
)
with pytest.raises(FailedToImportModule) as e:
Flow._import_requisites([tool2], [node1])
assert str(e.value).startswith(
"Failed to import modules with error: Import tool 'tool2' module 'module' failed."
)
def test_apply_default_node_variants(self):
node_variant = NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None}, use_variants=False),
description=None,
)
node_variants = {
"print_val": NodeVariants(
default_variant_id="variant1",
variants={"variant1": node_variant},
)
}
flow1 = Flow(
id="test_flow_id",
name="test_flow",
nodes=[Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=True)],
inputs={},
outputs={},
tools=[],
node_variants=node_variants,
)
# test when node.use_variants is True
flow1._apply_default_node_variants()
assert flow1.nodes[0].use_variants is False
assert flow1.nodes[0].inputs.keys() == {"input2"}
assert flow1.nodes[0].name == "print_val"
flow2 = Flow(
id="test_flow_id",
name="test_flow",
nodes=[Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=False)],
inputs={},
outputs={},
tools=[],
node_variants=node_variants,
)
# test when node.use_variants is False
tmp_nodes = flow2.nodes
flow2._apply_default_node_variants()
assert flow2.nodes == tmp_nodes
@pytest.mark.parametrize(
"node_variants",
[
(None),
(
{
"test": NodeVariants(
default_variant_id="variant1",
variants={
"variant1": NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None})
)
},
)
}
),
(
{
"print_val": NodeVariants(
default_variant_id="test",
variants={
"variant1": NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None})
)
},
)
}
),
],
)
def test_apply_default_node_variant(self, node_variants):
node = Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=True)
assert Flow._apply_default_node_variant(node, node_variants) == node
def test_apply_node_overrides(self):
llm_node = Node(name="llm_node", tool=None, inputs={}, connection="open_ai_connection")
test_node = Node(
name="test_node", tool=None, inputs={"test": InputAssignment("test_value1", InputValueType.LITERAL)}
)
flow = Flow(id="test_flow_id", name="test_flow", nodes=[llm_node, test_node], inputs={}, outputs={}, tools=[])
assert flow == flow._apply_node_overrides(None)
assert flow == flow._apply_node_overrides({})
node_overrides = {
"other_node.connection": "some_connection",
}
with pytest.raises(ValueError):
flow._apply_node_overrides(node_overrides)
node_overrides = {
"llm_node.connection": "custom_connection",
"test_node.test": "test_value2",
}
flow = flow._apply_node_overrides(node_overrides)
assert flow.nodes[0].connection == "custom_connection"
assert flow.nodes[1].inputs["test"].value == "test_value2"
def test_has_aggregation_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow1 = Flow(id="id", name="name", nodes=[llm_node], inputs={}, outputs={}, tools=[])
assert not flow1.has_aggregation_node()
flow2 = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow2.has_aggregation_node()
def test_get_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
flow = Flow(id="id", name="name", nodes=[llm_node], inputs={}, outputs={}, tools=[])
assert flow.get_node("llm_node") is llm_node
assert flow.get_node("other_node") is None
def test_get_tool(self):
tool = Tool(name="tool", type=ToolType.PYTHON, inputs={})
flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[tool])
assert flow.get_tool("tool") is tool
assert flow.get_tool("other_tool") is None
def test_is_reduce_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_reduce_node("llm_node")
assert flow.is_reduce_node("aggre_node")
def test_is_normal_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow.is_normal_node("llm_node")
assert not flow.is_normal_node("aggre_node")
def test_is_llm_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={}, type=ToolType.LLM)
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow.is_llm_node(llm_node)
assert not flow.is_llm_node(aggre_node)
def test_is_referenced_by_flow_output(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
output = {
"output": FlowOutputDefinition(
type=ValueType.STRING, reference=InputAssignment("llm_node", InputValueType.NODE_REFERENCE, "output")
)
}
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs=output, tools=[])
assert flow.is_referenced_by_flow_output(llm_node)
assert not flow.is_referenced_by_flow_output(aggre_node)
def test_is_node_referenced_by(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(
name="aggre_node",
tool=None,
inputs={"input": InputAssignment(value="llm_node", value_type=InputValueType.NODE_REFERENCE)},
aggregation=True,
)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_node_referenced_by(aggre_node, llm_node)
assert flow.is_node_referenced_by(llm_node, aggre_node)
def test_is_referenced_by_other_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(
name="aggre_node",
tool=None,
inputs={"input": InputAssignment(value="llm_node", value_type=InputValueType.NODE_REFERENCE)},
aggregation=True,
)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_referenced_by_other_node(aggre_node)
assert flow.is_referenced_by_other_node(llm_node)
def test_is_chat_flow(self):
chat_input = {"question": FlowInputDefinition(type=ValueType.STRING, is_chat_input=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs=chat_input, outputs={}, tools=[])
assert not standard_flow.is_chat_flow()
assert chat_flow.is_chat_flow()
def test_get_chat_input_name(self):
chat_input = {"question": FlowInputDefinition(type=ValueType.STRING, is_chat_input=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs=chat_input, outputs={}, tools=[])
assert standard_flow.get_chat_input_name() is None
assert chat_flow.get_chat_input_name() == "question"
def test_get_chat_output_name(self):
chat_output = {"answer": FlowOutputDefinition(type=ValueType.STRING, reference=None, is_chat_output=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs=chat_output, tools=[])
assert standard_flow.get_chat_output_name() is None
assert chat_flow.get_chat_output_name() == "answer"
def test_replace_with_variant(self):
node0 = Node(name="node0", tool=None, inputs={"input0": None}, use_variants=True)
node1 = Node(name="node1", tool="tool1", inputs={"input1": None}, use_variants=False)
node2 = Node(name="node2", tool="tool2", inputs={"input2": None}, use_variants=False)
node_variant = Node(name="node0", tool="tool3", inputs={"input3": None}, use_variants=False)
node_variants = {
"print_val": NodeVariants(
default_variant_id="variant1",
variants={"variant1": NodeVariant(node_variant, None)},
)
}
flow = Flow("test_flow_id", "test_flow", [node0, node1, node2], {}, {}, [], node_variants)
# flow = Flow.from_yaml(get_yaml_file("web_classification"))
tool_cnt = len(flow.tools)
flow._replace_with_variant(node_variant, [flow.nodes[1].tool, flow.nodes[2].tool])
assert "input3" in flow.nodes[0].inputs
assert flow.nodes[0].tool == "tool3"
assert len(flow.tools) == tool_cnt + 2
@pytest.mark.unittest
class TestInputAssignment:
@pytest.mark.parametrize(
"value, expected_value",
[
(InputAssignment("value", InputValueType.LITERAL), "value"),
(InputAssignment("value", InputValueType.FLOW_INPUT), "${flow.value}"),
(InputAssignment("value", InputValueType.NODE_REFERENCE, "section"), "${value.section}"),
(
InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property"),
"${value.section.property}",
),
(InputAssignment(AzureContentSafetyConnection, InputValueType.LITERAL, "section", "property"), "ABCMeta"),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"serialized_value, expected_value",
[
(
"${value.section.property}",
InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property"),
),
(
"${flow.section.property}",
FlowInputAssignment("section.property", prefix="flow.", value_type=InputValueType.FLOW_INPUT),
),
("${value}", InputAssignment("value", InputValueType.NODE_REFERENCE, "output")),
("$value", InputAssignment("$value", InputValueType.LITERAL)),
("value", InputAssignment("value", InputValueType.LITERAL)),
],
)
def test_deserialize(self, serialized_value, expected_value):
input_assignment = InputAssignment.deserialize(serialized_value)
assert input_assignment == expected_value
@pytest.mark.parametrize(
"serialized_reference, expected_value",
[
("input", InputAssignment("input", InputValueType.NODE_REFERENCE, "output")),
("flow.section", FlowInputAssignment("section", value_type=InputValueType.FLOW_INPUT, prefix="flow.")),
(
"flow.section.property",
FlowInputAssignment("section.property", value_type=InputValueType.FLOW_INPUT, prefix="flow."),
),
],
)
def test_deserialize_reference(self, serialized_reference, expected_value):
assert InputAssignment.deserialize_reference(serialized_reference) == expected_value
@pytest.mark.parametrize(
"serialized_node_reference, expected_value",
[
("value", InputAssignment("value", InputValueType.NODE_REFERENCE, "output")),
("value.section", InputAssignment("value", InputValueType.NODE_REFERENCE, "section")),
("value.section.property", InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property")),
],
)
def test_deserialize_node_reference(self, serialized_node_reference, expected_value):
assert InputAssignment.deserialize_node_reference(serialized_node_reference) == expected_value
@pytest.mark.unittest
class TestFlowInputAssignment:
@pytest.mark.parametrize(
"input_value, expected_value",
[
("flow.section.property", True),
("inputs.section.property", True),
("section.property", False),
("", False),
],
)
def test_is_flow_input(self, input_value, expected_value):
assert FlowInputAssignment.is_flow_input(input_value) == expected_value
def test_deserialize(self):
expected_input = FlowInputAssignment("section.property", prefix="inputs.", value_type=InputValueType.FLOW_INPUT)
assert FlowInputAssignment.deserialize("inputs.section.property") == expected_input
expected_flow = FlowInputAssignment("section.property", prefix="flow.", value_type=InputValueType.FLOW_INPUT)
assert FlowInputAssignment.deserialize("flow.section.property") == expected_flow
with pytest.raises(ValueError):
FlowInputAssignment.deserialize("value")
@pytest.mark.unittest
class TestToolSource:
@pytest.mark.parametrize(
"tool_source, expected_value",
[
({}, ToolSource(type=ToolSourceType.Code)),
({"type": ToolSourceType.Code.value}, ToolSource(type=ToolSourceType.Code)),
(
{"type": ToolSourceType.Package.value, "tool": "tool", "path": "path"},
ToolSource(type=ToolSourceType.Package, tool="tool", path="path"),
),
],
)
def test_deserialize(self, tool_source, expected_value):
assert ToolSource.deserialize(tool_source) == expected_value
@pytest.mark.unittest
class TestNode:
@pytest.mark.parametrize(
"node, expected_value",
[
(
Node(name="test_node", tool="test_tool", inputs={}),
{"name": "test_node", "tool": "test_tool", "inputs": {}},
),
(
Node(name="test_node", tool="test_tool", inputs={}, aggregation=True),
{"name": "test_node", "tool": "test_tool", "inputs": {}, "aggregation": True, "reduce": True},
),
],
)
def test_serialize(self, node, expected_value):
assert node.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{"name": "test_node", "tool": "test_tool", "inputs": {}},
Node(name="test_node", tool="test_tool", inputs={}),
),
(
{"name": "test_node", "tool": "test_tool", "inputs": {}, "aggregation": True},
Node(name="test_node", tool="test_tool", inputs={}, aggregation=True),
),
],
)
def test_deserialize(self, data, expected_value):
assert Node.deserialize(data) == expected_value
@pytest.mark.unittest
class TestFlowInputDefinition:
@pytest.mark.parametrize(
"value, expected_value",
[
(FlowInputDefinition(type=ValueType.BOOL), {"type": ValueType.BOOL.value}),
(
FlowInputDefinition(
type=ValueType.STRING,
default="default",
description="description",
enum=["enum1", "enum2"],
is_chat_input=True,
is_chat_history=True,
),
{
"type": ValueType.STRING.value,
"default": "default",
"description": "description",
"enum": ["enum1", "enum2"],
"is_chat_input": True,
"is_chat_history": True,
},
),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"type": ValueType.STRING,
"default": "default",
"description": "description",
"enum": ["enum1", "enum2"],
"is_chat_input": True,
"is_chat_history": True,
},
FlowInputDefinition(
type=ValueType.STRING,
default="default",
description="description",
enum=["enum1", "enum2"],
is_chat_input=True,
is_chat_history=True,
),
),
(
{
"type": ValueType.STRING,
},
FlowInputDefinition(
type=ValueType.STRING, description="", enum=[], is_chat_input=False, is_chat_history=None
),
),
],
)
def test_deserialize(self, data, expected_value):
assert FlowInputDefinition.deserialize(data) == expected_value
@pytest.mark.unittest
class TestFlowOutputDefinition:
@pytest.mark.parametrize(
"value, expected_value",
[
(FlowOutputDefinition(type=ValueType.BOOL, reference=None), {"type": ValueType.BOOL.value}),
(
FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment("value", InputValueType.NODE_REFERENCE),
description="description",
evaluation_only=True,
is_chat_output=True,
),
{
"type": ValueType.STRING.value,
"reference": "${value.}",
"description": "description",
"evaluation_only": True,
"is_chat_output": True,
},
),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"type": ValueType.STRING,
},
FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment("", InputValueType.LITERAL),
),
),
],
)
def test_deserialize(self, data, expected_value):
assert FlowOutputDefinition.deserialize(data) == expected_value
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_mode.py | import pytest
from promptflow.contracts.run_mode import RunMode
@pytest.mark.unittest
@pytest.mark.parametrize(
"run_mode, expected",
[
("Test", RunMode.Test),
("SingleNode", RunMode.SingleNode),
("Batch", RunMode.Batch),
("Default", RunMode.Test),
],
)
def test_parse(run_mode, expected):
assert RunMode.parse(run_mode) == expected
@pytest.mark.unittest
def test_parse_invalid():
with pytest.raises(ValueError):
RunMode.parse(123)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_types.py | import pytest
from promptflow.contracts.types import AssistantDefinition, Secret, PromptTemplate, FilePath
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
@pytest.mark.unittest
def test_secret():
secret = Secret('my_secret')
secret.set_secret_name('secret_name')
assert secret.secret_name == 'secret_name'
@pytest.mark.unittest
def test_prompt_template():
prompt = PromptTemplate('my_prompt')
assert isinstance(prompt, str)
assert str(prompt) == 'my_prompt'
@pytest.mark.unittest
def test_file_path():
file_path = FilePath('my_file_path')
assert isinstance(file_path, str)
@pytest.mark.unittest
def test_assistant_definition():
data = {"model": "model", "instructions": "instructions", "tools": []}
assistant_definition = AssistantDefinition.deserialize(data)
assert isinstance(assistant_definition, AssistantDefinition)
assert assistant_definition.model == "model"
assert assistant_definition.instructions == "instructions"
assert assistant_definition.tools == []
assert assistant_definition.serialize() == data
assert isinstance(assistant_definition.init_tool_invoker(), AssistantToolInvoker)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_info.py | from datetime import datetime
import pytest
from promptflow.contracts.run_info import FlowRunInfo, RunInfo, Status
@pytest.mark.unittest
class TestStatus:
@pytest.mark.parametrize(
"status,expected",
[
(Status.Completed, True),
(Status.Failed, True),
(Status.Bypassed, True),
(Status.Canceled, True),
(Status.Running, False),
(Status.Preparing, False),
(Status.NotStarted, False),
(Status.CancelRequested, False),
(123, False),
],
)
def test_status_is_terminated(self, status, expected):
assert Status.is_terminated(status) == expected
@pytest.mark.unittest
class TestRunInfo:
def test_creation(self):
run_info = RunInfo(
node="node1",
flow_run_id="123",
run_id="123:456",
status=Status.Running,
inputs=[],
output={},
metrics={},
error={},
parent_run_id="789",
start_time=datetime.now(),
end_time=datetime.now(),
system_metrics={},
)
assert run_info.node == "node1"
assert run_info.flow_run_id == "123"
assert run_info.run_id == "123:456"
assert run_info.status == Status.Running
def test_deserialize(self):
run_info_dict = {
"node": "get_answer",
"flow_run_id": "",
"run_id": "dummy_run_id",
"status": "Completed",
"inputs": {"question": "string"},
"output": "Hello world: What's promptflow?",
"metrics": None,
"error": None,
"parent_run_id": "dummy_flow_run_id",
"start_time": "2023-11-24T06:03:20.2688262Z",
"end_time": "2023-11-24T06:03:20.268858Z",
"index": 0,
"api_calls": None,
"variant_id": "",
"cached_run_id": None,
"cached_flow_run_id": None,
"logs": None,
"system_metrics": {"duration": "00:00:00.0000318", "total_tokens": 0},
"result": "Hello world: What's promptflow?",
}
run_info = RunInfo.deserialize(run_info_dict)
assert run_info.index == 0
assert isinstance(run_info.start_time, datetime) and isinstance(run_info.end_time, datetime)
assert run_info.status == Status.Completed
assert run_info.run_id == "dummy_run_id"
assert run_info.api_calls is None
assert run_info.system_metrics == {"duration": "00:00:00.0000318", "total_tokens": 0}
assert run_info.output == "Hello world: What's promptflow?"
@pytest.mark.unittest
class TestFlowRunInfo:
def test_creation(self):
flow_run_info = FlowRunInfo(
run_id="123:456",
status=Status.Running,
error={},
inputs={},
output={},
metrics={},
request={},
parent_run_id="789",
root_run_id="123",
source_run_id="456",
flow_id="flow1",
start_time=datetime.now(),
end_time=datetime.now(),
system_metrics={},
upload_metrics=False,
)
assert flow_run_info.run_id == "123:456"
assert flow_run_info.status == Status.Running
assert flow_run_info.flow_id == "flow1"
def test_deserialize(self):
flow_run_info_dict = {
"run_id": "dummy_run_id",
"status": "Completed",
"error": None,
"inputs": {"question": "What's promptflow?"},
"output": {"answer": "Hello world: What's promptflow?"},
"metrics": None,
"request": None,
"parent_run_id": None,
"root_run_id": None,
"source_run_id": None,
"flow_id": "Flow",
"start_time": "2023-11-23T10:58:37.9436245Z",
"end_time": "2023-11-23T10:58:37.9590789Z",
"index": 0,
"api_calls": None,
"variant_id": "",
"name": "",
"description": "",
"tags": None,
"system_metrics": {"duration": "00:00:00.0154544", "total_tokens": 0},
"result": {"answer": "Hello world: What's promptflow?"},
"upload_metrics": False,
}
flow_run_info = FlowRunInfo.deserialize(flow_run_info_dict)
assert flow_run_info.index == 0
assert isinstance(flow_run_info.start_time, datetime) and isinstance(flow_run_info.end_time, datetime)
assert flow_run_info.status == Status.Completed
assert flow_run_info.run_id == "dummy_run_id"
assert flow_run_info.api_calls is None
assert flow_run_info.system_metrics == {"duration": "00:00:00.0154544", "total_tokens": 0}
assert flow_run_info.output == {"answer": "Hello world: What's promptflow?"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_tool.py | from enum import Enum
from typing import Any, Callable, NewType, Optional, Tuple, TypeVar, Union
import pytest
from promptflow._core.tools_manager import connections
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow._sdk.entities._connection import AzureContentSafetyConnection
from promptflow.contracts.multimedia import Image
from promptflow.contracts.run_info import Status
from promptflow.contracts.tool import (
AssistantDefinition,
ConnectionType,
InputDefinition,
OutputDefinition,
Tool,
ToolType,
ValueType,
_deserialize_enum,
)
from promptflow.contracts.types import FilePath, PromptTemplate, Secret
class MyConnection(CustomStrongTypeConnection):
pass
my_connection = MyConnection(name="my_connection", secrets={"key": "value"})
def some_function():
pass
class TestStatus(Enum):
Running = 1
Preparing = 2
Completed = 3
@pytest.mark.unittest
@pytest.mark.parametrize(
"enum, value, expected",
[
(Status, "Running", Status.Running),
(Status, "running", Status.Running),
(Status, "FAILED", Status.Failed),
(Status, "UNKNOWN", "UNKNOWN"),
(TestStatus, "Running", "Running"),
],
)
def test_deserialize_enum(enum, value, expected):
assert _deserialize_enum(enum, value) == expected
@pytest.mark.unittest
class TestValueType:
@pytest.mark.parametrize(
"value, expected",
[
(1, ValueType.INT),
(1.0, ValueType.DOUBLE),
(True, ValueType.BOOL),
("string", ValueType.STRING),
([], ValueType.LIST),
({}, ValueType.OBJECT),
(Secret("secret"), ValueType.SECRET),
(PromptTemplate("prompt"), ValueType.PROMPT_TEMPLATE),
(FilePath("file_path"), ValueType.FILE_PATH),
(AssistantDefinition("model", "instructions", []), ValueType.ASSISTANT_DEFINITION),
],
)
def test_from_value(self, value, expected):
assert ValueType.from_value(value) == expected
@pytest.mark.parametrize(
"value, expected",
[
(int, ValueType.INT),
(float, ValueType.DOUBLE),
(bool, ValueType.BOOL),
(str, ValueType.STRING),
(list, ValueType.LIST),
(dict, ValueType.OBJECT),
(Secret, ValueType.SECRET),
(PromptTemplate, ValueType.PROMPT_TEMPLATE),
(FilePath, ValueType.FILE_PATH),
(Image, ValueType.IMAGE),
(AssistantDefinition, ValueType.ASSISTANT_DEFINITION),
],
)
def test_from_type(self, value, expected):
assert ValueType.from_type(value) == expected
@pytest.mark.parametrize(
"value, value_type, expected",
[
("1", ValueType.INT, 1),
("1.0", ValueType.DOUBLE, 1.0),
("true", ValueType.BOOL, True),
("false", ValueType.BOOL, False),
(True, ValueType.BOOL, True),
(123, ValueType.STRING, "123"),
('["a", "b", "c"]', ValueType.LIST, ["a", "b", "c"]),
('{"key": "value"}', ValueType.OBJECT, {"key": "value"}),
("[1, 2, 3]", ValueType.OBJECT, [1, 2, 3]),
("{", ValueType.OBJECT, "{"),
([1, 2, 3], ValueType.OBJECT, [1, 2, 3]),
],
)
def test_parse(self, value, value_type, expected):
assert value_type.parse(value) == expected
@pytest.mark.parametrize(
"value, value_type",
[
("1", ValueType.BOOL),
({}, ValueType.LIST),
],
)
def test_parse_error(self, value, value_type):
with pytest.raises(ValueError):
value_type.parse(value)
@pytest.mark.unittest
class TestConnectionType:
@pytest.mark.parametrize(
"type_name, expected",
[
("AzureContentSafetyConnection", connections.get("AzureContentSafetyConnection")),
("AzureOpenAIConnection", connections.get("AzureOpenAIConnection")),
("_Connection", connections.get("_Connection")),
("unknown", None),
(123, None),
],
)
def test_get_connection_class(self, type_name, expected):
assert ConnectionType.get_connection_class(type_name) == expected
@pytest.mark.parametrize(
"type_name, expected",
[
("AzureContentSafetyConnection", True),
("AzureOpenAIConnection", True),
("_Connection", True),
("unknown", False),
(123, False),
],
)
def test_is_connection_class_name(self, type_name, expected):
assert ConnectionType.is_connection_class_name(type_name) == expected
@pytest.mark.parametrize(
"value, expected",
[
(connections.get("AzureContentSafetyConnection"), True),
(AzureContentSafetyConnection("api_key", "endpoint"), True),
(Status, False),
(ConnectionType.is_connection_value("non_connection_instance"), False),
],
)
def test_is_connection_value(self, value, expected):
assert ConnectionType.is_connection_value(value) == expected
@pytest.mark.parametrize(
"val, expected_res",
[
(my_connection, True),
(MyConnection, True),
(list, False),
# (list[str], False), # Python 3.9
# (list[int], False),
([1, 2, 3], False),
(float, False),
(int, False),
(5, False),
(str, False),
(some_function, False),
(Union[str, int], False),
# ((int | str), False), # Python 3.10
(tuple, False),
# (tuple[str, int], False), # Python 3.9
(Tuple[int, ...], False),
# (dict[str, Any], False), # Python 3.9
({"test1": [1, 2, 3], "test2": [4, 5, 6], "test3": [7, 8, 9]}, False),
(Any, False),
(None, False),
(Optional[str], False),
(TypeVar("T"), False),
(TypeVar, False),
(Callable, False),
(Callable[..., Any], False),
(NewType("MyType", int), False),
],
)
def test_is_custom_strong_type(self, val, expected_res):
assert ConnectionType.is_custom_strong_type(val) == expected_res
def test_serialize_conn(self):
assert ConnectionType.serialize_conn(AzureContentSafetyConnection) == "ABCMeta"
connection_instance = AzureContentSafetyConnection("api_key", "endpoint")
assert ConnectionType.serialize_conn(connection_instance) == "AzureContentSafetyConnection"
with pytest.raises(ValueError):
ConnectionType.serialize_conn("non_connection_instance")
@pytest.mark.unittest
class TestInputDefinition:
def test_serialize(self):
# test when len(type) == 1
input_def = InputDefinition(
[ValueType.STRING],
default="Default",
description="Description",
enum=["A", "B", "C"],
custom_type=["customtype"],
)
serialized = input_def.serialize()
assert serialized == {
"type": "string",
"default": "Default",
"description": "Description",
"enum": ["A", "B", "C"],
"custom_type": ["customtype"],
}
# test when len(type) > 1
input_def = InputDefinition([ValueType.STRING, ValueType.INT])
serialized = input_def.serialize()
assert serialized == {"type": ["string", "int"]}
def test_deserialize(self):
serialized = {"type": "string", "default": "Default", "description": "Description", "enum": ["A", "B", "C"]}
deserialized = InputDefinition.deserialize(serialized)
assert deserialized.type == [ValueType.STRING]
assert deserialized.default == "Default"
assert deserialized.description == "Description"
assert deserialized.enum == ["A", "B", "C"]
serialized = {"type": ["string", "int"]}
deserialized = InputDefinition.deserialize(serialized)
assert deserialized.type == [ValueType.STRING, ValueType.INT]
assert deserialized.default == ""
assert deserialized.description == ""
assert deserialized.enum == []
@pytest.mark.unittest
class TestOutDefinition:
@pytest.mark.parametrize(
"value, expected",
[
(
OutputDefinition([ValueType.STRING], description="Description", is_property=True),
{"type": "string", "description": "Description", "is_property": True},
),
(OutputDefinition([ValueType.STRING, ValueType.INT]), {"type": ["string", "int"], "is_property": False}),
],
)
def test_serialize(self, value, expected):
assert value.serialize() == expected
@pytest.mark.parametrize(
"value, expected",
[
(
{"type": "string", "description": "Description", "is_property": True},
OutputDefinition([ValueType.STRING], description="Description", is_property=True),
),
({"type": ["string", "int"]}, OutputDefinition([ValueType.STRING, ValueType.INT])),
],
)
def test_deserialize(self, value, expected):
assert OutputDefinition.deserialize(value) == expected
@pytest.mark.unittest
class TestTool:
@pytest.mark.parametrize(
"tool_type, expected_keys",
[
(ToolType._ACTION, ["name", "description", "enable_kwargs"]),
(ToolType.LLM, ["name", "type", "inputs", "description", "enable_kwargs"]),
],
)
def test_serialize_tool(self, tool_type, expected_keys):
tool = Tool(name="test_tool", type=tool_type, inputs={}, outputs={}, description="description")
serialized_tool = tool.serialize()
assert set(serialized_tool.keys()) == set(expected_keys)
def test_deserialize_tool(self):
data = {
"name": "test_tool",
"type": "LLM",
"inputs": {"input1": {"type": "ValueType1"}},
}
tool = Tool.deserialize(data)
assert tool.name == data["name"]
assert tool.type == ToolType[data["type"]]
assert "input1" in tool.inputs
@pytest.mark.parametrize(
"tooltype, connection_type, expected",
[
(ToolType.LLM, None, True),
(ToolType._ACTION, ["AzureContentSafetyConnection"], True),
(ToolType._ACTION, None, False),
],
)
def test_require_connection(self, tooltype, connection_type, expected):
tool = Tool(name="Test Tool", type=tooltype, inputs={}, connection_type=connection_type)
assert tool._require_connection() == expected
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/contracts/test_multimedia.py | import pytest
from promptflow.contracts.multimedia import Image, PFBytes
@pytest.mark.unittest
class TestMultimediaContract:
@pytest.mark.parametrize(
"value, mime_type, source_url",
[
(b"test", "image/*", None),
(b"test", "image/jpg", None),
(b"test", "image/png", None),
(b"test", None, None),
(b"test", "image/*", "mock_url"),
]
)
def test_image_contract(self, value, mime_type, source_url):
image = Image(value, mime_type, source_url)
if mime_type is None:
mime_type = "image/*"
assert image._mime_type == mime_type
assert image._hash == "a94a8fe5"
assert image.to_base64() == "dGVzdA=="
assert image.to_base64(with_type=True) == f"data:{mime_type};base64,dGVzdA=="
assert image.to_base64(with_type=True, dict_type=True) == {f"data:{mime_type};base64": "dGVzdA=="}
assert bytes(image) == value
assert image.source_url == source_url
assert str(image) == "Image(a94a8fe5)"
assert repr(image) == "Image(a94a8fe5)"
assert image.serialize() == "Image(a94a8fe5)"
assert image.serialize(lambda x: x.to_base64()) == "dGVzdA=="
@pytest.mark.parametrize(
"value, mime_type, source_url",
[
(b"test", "image/*", None),
(b"test", "image/jpg", None),
(b"test", "image/png", None),
(b"test", "image/*", "mock_url"),
]
)
def test_pfbytes_contract(self, value, mime_type, source_url):
pfBytes = PFBytes(value, mime_type, source_url)
assert pfBytes._mime_type == mime_type
assert pfBytes._hash == "a94a8fe5"
assert pfBytes.to_base64() == "dGVzdA=="
assert pfBytes.to_base64(with_type=True) == f"data:{mime_type};base64,dGVzdA=="
assert pfBytes.to_base64(with_type=True, dict_type=True) == {f"data:{mime_type};base64": "dGVzdA=="}
assert bytes(pfBytes) == value
assert pfBytes.source_url == source_url
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_validator.py | import pytest
from promptflow.contracts.flow import Flow, FlowInputDefinition
from promptflow.contracts.tool import ValueType
from promptflow.executor._errors import InputParseError, InputTypeError, InvalidAggregationInput, InvalidFlowRequest
from promptflow.executor.flow_validator import FlowValidator
from ...utils import WRONG_FLOW_ROOT, get_flow_from_folder
@pytest.mark.unittest
class TestFlowValidator:
@pytest.mark.parametrize(
"flow_folder, expected_node_order",
[
("unordered_nodes", ["first_node", "second_node", "third_node"]),
("unordered_nodes_with_skip", ["first_node", "second_node", "third_node"]),
("unordered_nodes_with_activate", ["first_node", "second_node", "third_node"]),
],
)
def test_ensure_nodes_order(self, flow_folder, expected_node_order):
flow = get_flow_from_folder(flow_folder)
flow = FlowValidator._ensure_nodes_order(flow)
node_order = [node.name for node in flow.nodes]
assert node_order == expected_node_order
@pytest.mark.parametrize(
"flow_folder, error_message",
[
(
"nodes_cycle",
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships for the nodes "
"['first_node', 'second_node'] and resolve the circular reference issue in the flow."
),
),
(
"nodes_cycle_with_activate",
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships "
"for the nodes ['first_node', 'second_node'] and resolve the circular reference issue in the flow."
),
),
(
"wrong_node_reference",
(
"Invalid node definitions found in the flow graph. Node 'second_node' references a non-existent "
"node 'third_node' in your flow. Please review your flow to ensure that the node "
"name is accurately specified."
),
),
(
"non_aggregation_reference_aggregation",
(
"Invalid node definitions found in the flow graph. Non-aggregate node 'test_node' "
"cannot reference aggregate nodes {'calculate_accuracy'}. Please review and rectify "
"the node reference."
),
),
(
"aggregation_activate_reference_non_aggregation",
(
"Invalid node definitions found in the flow graph. Non-aggregation node 'grade' cannot be "
"referenced in the activate config of the aggregation node 'calculate_accuracy'. Please "
"review and rectify the node reference."
),
),
],
)
def test_ensure_nodes_order_with_exception(self, flow_folder, error_message):
flow = get_flow_from_folder(flow_folder, root=WRONG_FLOW_ROOT)
with pytest.raises(InvalidFlowRequest) as e:
FlowValidator._ensure_nodes_order(flow)
assert str(e.value) == error_message, "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize(
"aggregated_flow_inputs, aggregation_inputs, error_message",
[
(
{},
{
"input1": "value1",
},
"The input for aggregation is incorrect. "
"The value for aggregated reference input 'input1' should be a list, "
"but received str. Please adjust the input value to match the expected format.",
),
(
{
"input1": "value1",
},
{},
"The input for aggregation is incorrect. "
"The value for aggregated flow input 'input1' should be a list, "
"but received str. Please adjust the input value to match the expected format.",
),
(
{"input1": ["value1_1", "value1_2"]},
{"input_2": ["value2_1"]},
"The input for aggregation is incorrect. The length of all aggregated inputs should be the same. "
"Current input lengths are: {'input1': 2, 'input_2': 1}. "
"Please adjust the input value in your input data.",
),
(
{
"input1": "value1",
},
{
"input1": "value1",
},
"The input for aggregation is incorrect. "
"The input 'input1' appears in both aggregated flow input and aggregated reference input. "
"Please remove one of them and try the operation again.",
),
],
)
def test_validate_aggregation_inputs_error(self, aggregated_flow_inputs, aggregation_inputs, error_message):
with pytest.raises(InvalidAggregationInput) as e:
FlowValidator._validate_aggregation_inputs(aggregated_flow_inputs, aggregation_inputs)
assert str(e.value) == error_message
@pytest.mark.parametrize(
"flow_folder",
["simple_flow_with_python_tool_and_aggregate"],
)
def test_ensure_outputs_valid_with_aggregation(self, flow_folder):
flow = get_flow_from_folder(flow_folder)
assert flow.outputs["content"] is not None
assert flow.outputs["aggregate_content"] is not None
flow.outputs = FlowValidator._ensure_outputs_valid(flow)
print(flow.outputs)
assert flow.outputs["content"] is not None
assert flow.outputs.get("aggregate_content") is None
@pytest.mark.parametrize(
"flow_folder, inputs, index, error_type, error_message",
[
(
"flow_with_list_input",
{"key": "['hello']"},
None,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'key' was "
"interpreted as JSON string since its type is 'list'. However, the value "
"'['hello']' is invalid for JSON parsing. Error details: (JSONDecodeError) "
"Expecting value: line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
(
"flow_with_list_input",
{"key": "['hello']"},
0,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'key' in line 0 of input data was "
"interpreted as JSON string since its type is 'list'. However, the value "
"'['hello']' is invalid for JSON parsing. Error details: (JSONDecodeError) "
"Expecting value: line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
],
)
def test_resolve_flow_inputs_type_json_error_for_list_type(
self, flow_folder, inputs, index, error_type, error_message
):
flow = get_flow_from_folder(flow_folder)
with pytest.raises(error_type) as exe_info:
FlowValidator.resolve_flow_inputs_type(flow, inputs, idx=index)
assert error_message == exe_info.value.message
@pytest.mark.parametrize(
"inputs, expected_result",
[({"test_input": ["1", "2"]}, {"test_input": [1, 2]})],
)
def test_resolve_aggregated_flow_inputs_type(self, inputs, expected_result):
flow = Flow(
id="fakeId",
name=None,
nodes=[],
inputs={"test_input": FlowInputDefinition(type=ValueType.INT)},
outputs=None,
tools=[],
)
result = FlowValidator.resolve_aggregated_flow_inputs_type(flow, inputs)
assert result == expected_result
@pytest.mark.parametrize(
"inputs, expected_message",
[
(
{"test_input": ["1", "str"]},
(
"The input for flow is incorrect. The value for flow input 'test_input' in line 1 of input data "
"does not match the expected type 'int'. "
"Please change flow input type or adjust the input value in your input data."
),
)
],
)
def test_resolve_aggregated_flow_inputs_type_error(self, inputs, expected_message):
flow = Flow(
id="fakeId",
name=None,
nodes=[],
inputs={"test_input": FlowInputDefinition(type=ValueType.INT)},
outputs=None,
tools=[],
)
with pytest.raises(InputTypeError) as ex:
FlowValidator.resolve_aggregated_flow_inputs_type(flow, inputs)
assert expected_message == str(ex.value)
@pytest.mark.parametrize(
"input, type, expected_result",
[
("1", ValueType.INT, 1),
('["1", "2"]', ValueType.LIST, ["1", "2"]),
],
)
def test_parse_input_value(self, input, type, expected_result):
input_key = "test_input"
result = FlowValidator._parse_input_value(input_key, input, type)
assert result == expected_result
@pytest.mark.parametrize(
"input, type, index, error_type, expected_message",
[
(
"str",
ValueType.INT,
None,
InputTypeError,
(
"The input for flow is incorrect. The value for flow input 'my_input' does not match the expected "
"type 'int'. Please change flow input type or adjust the input value in your input data."
),
),
(
"['1', '2']",
ValueType.LIST,
None,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'my_input' was interpreted as JSON "
"string since its type is 'list'. However, the value '['1', '2']' is invalid for JSON parsing. "
"Error details: (JSONDecodeError) Expecting value: line 1 column 2 (char 1). "
"Please make sure your inputs are properly formatted."
),
),
(
"str",
ValueType.INT,
10,
InputTypeError,
(
"The input for flow is incorrect. The value for flow input 'my_input' in line 10 of "
"input data does not match the expected type 'int'. "
"Please change flow input type or adjust the input value in your input data."
),
),
(
"['1', '2']",
ValueType.LIST,
10,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'my_input' in line 10 of input data "
"was interpreted as JSON string since its type is 'list'. However, the value '['1', '2']' is "
"invalid for JSON parsing. Error details: (JSONDecodeError) Expecting value: "
"line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
],
)
def test_parse_input_value_error(self, input, type, index, error_type, expected_message):
input_key = "my_input"
with pytest.raises(error_type) as ex:
FlowValidator._parse_input_value(input_key, input, type, index)
assert expected_message == str(ex.value)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_dag_manager.py | import pytest
from promptflow.contracts.flow import ActivateCondition, InputAssignment, Node
from promptflow.executor._dag_manager import DAGManager
def create_test_node(name, input, activate=None):
input = InputAssignment.deserialize(input)
activate = ActivateCondition.deserialize(activate, name) if activate else None
return Node(
name=name,
tool="test_tool",
connection="azure_open_ai_connection",
inputs={"test_input": input, "test_input2": InputAssignment("hello world")},
provider="test_provider",
api="test_api",
activate=activate,
)
def pop_ready_node_names(dag_manager: DAGManager):
return {node.name for node in dag_manager.pop_ready_nodes()}
def pop_bypassed_node_names(dag_manager: DAGManager):
return {node.name for node in dag_manager.pop_bypassable_nodes()}
@pytest.mark.unittest
class TestDAGManager:
def test_pop_ready_nodes(self):
nodes = [
create_test_node("node1", input="value1"),
create_test_node("node2", input="${node1.output}"),
create_test_node("node3", input="${node1.output}"),
]
dag_manager = DAGManager(nodes, flow_inputs={})
assert pop_ready_node_names(dag_manager) == {"node1"}
dag_manager.complete_nodes({"node1": None})
assert pop_ready_node_names(dag_manager) == {"node2", "node3"}
dag_manager.complete_nodes({"node2": None, "node3": None})
def test_pop_bypassed_nodes(self):
nodes = [
create_test_node("node1", input="value1"),
create_test_node("node2", input="${inputs.text}", activate={"when": "${inputs.text}", "is": "world"}),
create_test_node("node3", input="${node1.output}"),
create_test_node("node4", input="${node2.output}"),
]
flow_inputs = {"text": "hello"}
dag_manager = DAGManager(nodes, flow_inputs)
expected_bypassed_nodes = {"node2", "node4"}
assert pop_bypassed_node_names(dag_manager) == expected_bypassed_nodes
assert dag_manager.bypassed_nodes.keys() == expected_bypassed_nodes
def test_complete_nodes(self):
nodes = [create_test_node("node1", input="value1")]
dag_manager = DAGManager(nodes, flow_inputs={})
dag_manager.complete_nodes({"node1": {"output1": "value1"}})
assert len(dag_manager.completed_nodes_outputs) == 1
assert dag_manager.completed_nodes_outputs["node1"] == {"output1": "value1"}
def test_completed(self):
nodes = [
create_test_node("node1", input="${inputs.text}", activate={"when": "${inputs.text}", "is": "hello"}),
create_test_node("node2", input="${node1.output}"),
]
flow_inputs = {"text": "hello"}
dag_manager = DAGManager(nodes, flow_inputs)
assert pop_ready_node_names(dag_manager) == {"node1"}
dag_manager.complete_nodes({"node1": {"output1": "value1"}})
assert pop_ready_node_names(dag_manager) == {"node2"}
dag_manager.complete_nodes({"node2": {"output1": "value1"}})
assert dag_manager.completed_nodes_outputs.keys() == {"node1", "node2"}
assert dag_manager.completed()
def test_get_node_valid_inputs(self):
nodes = [
create_test_node("node1", input="value1"),
create_test_node("node2", input="${node1.output}"),
]
def f(input):
return input
flow_inputs = {}
dag_manager = DAGManager(nodes, flow_inputs)
dag_manager.complete_nodes({"node1": {"output1": "value1"}})
valid_inputs = dag_manager.get_node_valid_inputs(nodes[1], f)
assert valid_inputs == {"test_input": {"output1": "value1"}, "test_input2": "hello world"}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_assistant_tool_invoker.py | import pytest
from pathlib import Path
from typing import Callable
from promptflow import tool
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
from promptflow.executor._errors import UnsupportedAssistantToolType
@pytest.mark.unittest
class TestAssistantToolInvoker:
@pytest.fixture
def tool_definitions(self):
return [
{"type": "code_interpreter"},
{"type": "retrieval"},
{
"type": "function",
"tool_type": "python",
"source": {"type": "code", "path": "test_assistant_tool_invoker.py"},
}
]
@pytest.mark.parametrize(
"predefined_inputs", [({}), ({"input_int": 1})]
)
def test_load_tools(self, predefined_inputs):
input_int = 1
input_str = "test"
tool_definitions = [
{"type": "code_interpreter"},
{"type": "retrieval"},
{
"type": "function",
"tool_type": "python",
"source": {"type": "code", "path": "test_assistant_tool_invoker.py"},
"predefined_inputs": predefined_inputs
}
]
# Test load tools
invoker = AssistantToolInvoker.init(tool_definitions, working_dir=Path(__file__).parent)
for tool_name, assistant_tool in invoker._assistant_tools.items():
assert tool_name in ("code_interpreter", "retrieval", "sample_tool")
assert assistant_tool.name == tool_name
assert isinstance(assistant_tool.openai_definition, dict)
if tool_name in ("code_interpreter", "retrieval"):
assert assistant_tool.func is None
else:
assert isinstance(assistant_tool.func, Callable)
# Test to_openai_tools
descriptions = invoker.to_openai_tools()
assert len(descriptions) == 3
properties = {
"input_int": {"description": "This is a sample input int.", "type": "number"},
"input_str": {"description": "This is a sample input str.", "type": "string"}
}
required = ["input_int", "input_str"]
self._remove_predefined_inputs(properties, predefined_inputs.keys())
self._remove_predefined_inputs(required, predefined_inputs.keys())
for description in descriptions:
if description["type"] in ("code_interpreter", "retrieval"):
assert description == {"type": description["type"]}
else:
assert description == {
"type": "function",
"function": {
"name": "sample_tool",
"description": "This is a sample tool.",
"parameters": {
"type": "object",
"properties": properties,
"required": required
}
}
}
# Test invoke tool
kwargs = {"input_int": input_int, "input_str": input_str}
self._remove_predefined_inputs(kwargs, predefined_inputs.keys())
result = invoker.invoke_tool(func_name="sample_tool", kwargs=kwargs)
assert result == (input_int, input_str)
def test_load_tools_with_invalid_case(self):
tool_definitions = [{"type": "invalid_type"}]
with pytest.raises(UnsupportedAssistantToolType) as exc_info:
AssistantToolInvoker.init(tool_definitions)
assert "Unsupported assistant tool type" in exc_info.value.message
def _remove_predefined_inputs(self, value: any, predefined_inputs: list):
for input in predefined_inputs:
if input in value:
if isinstance(value, dict):
value.pop(input)
elif isinstance(value, list):
value.remove(input)
@tool
def sample_tool(input_int: int, input_str: str):
"""This is a sample tool.
:param input_int: This is a sample input int.
:type input_int: int
:param input_str: This is a sample input str.
:type input_str: str
"""
return input_int, input_str
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_tool_resolver.py | import re
import sys
from pathlib import Path
from typing import List
from unittest.mock import mock_open
import pytest
from jinja2 import TemplateSyntaxError
from promptflow._core._errors import InvalidSource
from promptflow._core.tools_manager import ToolLoader
from promptflow._internal import tool
from promptflow._sdk.entities import CustomConnection, CustomStrongTypeConnection
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.flow import InputAssignment, InputValueType, Node, ToolSource, ToolSourceType
from promptflow.contracts.tool import AssistantDefinition, InputDefinition, Secret, Tool, ToolType, ValueType
from promptflow.contracts.types import PromptTemplate
from promptflow.exceptions import UserErrorException
from promptflow.executor._errors import (
ConnectionNotFound,
InvalidConnectionType,
NodeInputValidationError,
ResolveToolError,
ValueTypeUnresolved,
)
from promptflow.executor._tool_resolver import ResolvedTool, ToolResolver
from ...utils import DATA_ROOT, FLOW_ROOT
TEST_ROOT = Path(__file__).parent.parent.parent
REQUESTS_PATH = TEST_ROOT / "test_configs/executor_api_requests"
WRONG_REQUESTS_PATH = TEST_ROOT / "test_configs/executor_wrong_requests"
class MyFirstCSTConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
@tool(streaming_option_parameter="stream_enabled")
def mock_package_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
@pytest.mark.unittest
class TestToolResolver:
@pytest.fixture
def resolver(self):
return ToolResolver(working_dir=None, connections={})
def test_resolve_tool_by_node_with_diff_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
mocker.patch.object(
resolver,
"_resolve_package_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_script_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_prompt_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_llm_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_integrate_prompt_in_package_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Package)
resolver.resolve_tool_by_node(node)
resolver._resolve_package_node.assert_called_once()
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Code)
resolver.resolve_tool_by_node(node)
resolver._resolve_script_node.assert_called_once()
node.type = ToolType.PROMPT
resolver.resolve_tool_by_node(node)
resolver._resolve_prompt_node.assert_called_once()
node.type = ToolType.LLM
resolver.resolve_tool_by_node(node)
resolver._resolve_llm_node.assert_called_once()
resolver._resolve_package_node.reset_mock()
node.type = ToolType.CUSTOM_LLM
node.source = mocker.Mock(type=ToolSourceType.PackageWithPrompt)
resolver.resolve_tool_by_node(node)
resolver._resolve_package_node.assert_called_once()
resolver._integrate_prompt_in_package_node.assert_called_once()
def test_resolve_tool_by_node_with_invalid_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool type" in exec_info.value.message
def test_resolve_tool_by_node_with_invalid_source_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool source type" in exec_info.value.message
node.type = ToolType.CUSTOM_LLM
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool source type" in exec_info.value.message
def test_resolve_tool_by_node_with_no_source(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.source = None
with pytest.raises(ResolveToolError) as ex:
resolver.resolve_tool_by_node(node)
assert isinstance(ex.value.inner_exception, UserErrorException)
def test_resolve_tool_by_node_with_no_source_path(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PROMPT
node.source = mocker.Mock(type=ToolSourceType.Package, path=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, InvalidSource)
assert "Node source path" in exec_info.value.message
def test_resolve_tool_by_node_with_duplicated_inputs(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PROMPT
mocker.patch.object(resolver, "_load_source_content", return_value="{{template}}")
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NodeInputValidationError)
assert "These inputs are duplicated" in exec_info.value.message
def test_resolve_tool_by_node_with_invalid_template(self, resolver, mocker):
node = mocker.Mock(tool=None, inputs={})
node.name = "node"
node.type = ToolType.PROMPT
mocker.patch.object(resolver, "_load_source_content", return_value="{{current context}}")
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, TemplateSyntaxError)
expected_message = (
"Tool load failed in 'node': Jinja parsing failed at line 1: "
"(TemplateSyntaxError) expected token 'end of print statement', got 'context'"
)
assert expected_message in exec_info.value.message
def test_convert_node_literal_input_types_with_invalid_case(self):
# Case 1: conn_name not in connections, should raise conn_name not found error
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
)
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
# Case 2: conn_name in connections, but type not matched
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._convert_node_literal_input_types(node, tool)
message = "'AzureOpenAIConnection' is not supported, valid types ['CustomConnection']"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 3: Literal value, type mismatch
tool = Tool(name="mock", type="python", inputs={"int_input": InputDefinition(type=[ValueType.INT])})
node = Node(
name="mock",
tool=tool,
inputs={"int_input": InputAssignment(value="invalid", value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
message = "value 'invalid' is not type int"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 4: Unresolved value, like newly added type not in old version ValueType enum
tool = Tool(name="mock", type="python", inputs={"int_input": InputDefinition(type=["A_good_type"])})
node = Node(
name="mock",
tool=tool,
inputs={"int_input": InputAssignment(value="invalid", value_type=InputValueType.LITERAL)},
)
with pytest.raises(ValueTypeUnresolved):
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
# Case 5: Literal value, invalid image in list
tool = Tool(name="mock", type="python", inputs={"list_input": InputDefinition(type=[ValueType.LIST])})
invalid_image = {"data:image/jpg;base64": "invalid_image"}
node = Node(
name="mock",
tool=tool,
inputs={"list_input": InputAssignment(value=[invalid_image], value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
message = "Invalid base64 image"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 6: Literal value, invalid assistant definition path
tool = Tool(
name="mock",
type="python",
inputs={"assistant_definition": InputDefinition(type=[ValueType.ASSISTANT_DEFINITION])},
)
node = Node(
name="mock",
tool=tool,
inputs={"assistant_definition": InputAssignment(value="invalid_path", value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=Path(__file__).parent, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
assert (
"Failed to load assistant definition" in exe_info.value.message
and "is not a valid path" in exe_info.value.message
), "Expected: {}, Actual: {}".format(message, exe_info.value.message)
def test_resolve_llm_connection_to_inputs(self):
# Case 1: node.connection is not specified
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
# Case 2: node.connection is not found from connection manager
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name1",
)
connections = {}
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
# Case 3: Tool definition with bad input type list
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["int"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(InvalidConnectionType) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert "Connection type can not be resolved for tool" in exe_info.value.message
# Case 4: Tool type not match the connection manager return
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["OpenAIConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(InvalidConnectionType) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert "Invalid connection" in exe_info.value.message
# Case 5: Normal case
tool = Tool(
name="mock",
type="python",
inputs={"conn": InputDefinition(type=["OpenAIConnection", "AzureOpenAIConnection"])},
)
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
key, conn = tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert key == "conn"
assert isinstance(conn, AzureOpenAIConnection)
def test_resolve_llm_node(self, mocker):
def mock_llm_api_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.LLM, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_llm_node", return_value=tool)
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_package_tool",
return_value=(mock_llm_api_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
mocker.patch.object(tool_resolver, "_load_source_content", return_value="{{text}}")
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
"image": InputAssignment(value=str(DATA_ROOT / "logo.jpg"), value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_llm_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
pattern = re.compile(r"^Hello World!!\[image\]\(Image\([a-z0-9]{8}\)\)$")
prompt = resolved_tool.callable(**kwargs)
assert re.match(pattern, prompt)
def test_resolve_script_node(self, mocker):
def mock_python_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.PYTHON, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, tool))
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_tool_from_module",
return_value=(mock_python_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"prompt": InputAssignment(value="{{text}}", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_script_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
def test_resolve_script_node_with_assistant_definition(self, mocker):
def mock_python_func(input: AssistantDefinition):
if input.model == "model" and input.instructions == "instructions" and input.tools == []:
return True
return False
tool_loader = ToolLoader(working_dir=None)
tool = Tool(
name="mock", type=ToolType.PYTHON, inputs={"input": InputDefinition(type=[ValueType.ASSISTANT_DEFINITION])}
)
mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, tool))
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_tool_from_module",
return_value=(mock_python_func, {}),
)
tool_resolver = ToolResolver(working_dir=Path(__file__).parent, connections={})
tool_resolver._tool_loader = tool_loader
mocker.patch("builtins.open", mock_open())
mocker.patch(
"ruamel.yaml.YAML.load", return_value={"model": "model", "instructions": "instructions", "tools": []}
)
node = Node(
name="mock",
tool=None,
inputs={"input": InputAssignment(value="test_tool_resolver.py", value_type=InputValueType.LITERAL)},
)
resolved_tool = tool_resolver._resolve_script_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 1
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs)
def test_resolve_package_node(self, mocker):
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.PYTHON, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_package_node", return_value=tool)
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_package_tool",
return_value=(mock_package_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"prompt": InputAssignment(value="{{text}}", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_package_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
def test_integrate_prompt_in_package_node(self, mocker):
tool_resolver = ToolResolver(working_dir=None, connections={})
mocker.patch.object(
tool_resolver,
"_load_source_content",
return_value="{{text}}",
)
tool = Tool(name="mock", type=ToolType.CUSTOM_LLM, inputs={"prompt": InputDefinition(type=["PromptTemplate"])})
node = Node(
name="mock",
tool=None,
inputs={"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL)},
connection="conn_name",
provider="mock",
)
resolved_tool = ResolvedTool(node=node, callable=mock_package_func, definition=tool, init_args=None)
assert resolved_tool.callable._streaming_option_parameter == "stream_enabled"
resolved_tool = tool_resolver._integrate_prompt_in_package_node(resolved_tool)
assert resolved_tool.callable._streaming_option_parameter == "stream_enabled"
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
@pytest.mark.parametrize(
"conn_types, expected_type",
[
(["MyFirstCSTConnection"], MyFirstCSTConnection),
(["CustomConnection", "MyFirstCSTConnection"], CustomConnection),
(["CustomConnection", "MyFirstCSTConnection", "MySecondCSTConnection"], CustomConnection),
(["MyFirstCSTConnection", "MySecondCSTConnection"], MyFirstCSTConnection),
],
)
def test_convert_to_custom_strong_type_connection_value(self, conn_types: List[str], expected_type, mocker):
connections = {"conn_name": {"type": "CustomConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Code)
tool = Tool(name="tool", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
m = sys.modules[__name__]
v = InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)
actual = tool_resolver._convert_to_custom_strong_type_connection_value(
"conn_name", v, node, tool, conn_types, m
)
assert isinstance(actual, expected_type)
assert actual.api_base == "mock"
def test_load_source(self):
# Create a mock Node object with a valid source path
node = Node(name="mock", tool=None, inputs={}, source=ToolSource())
node.source.path = "./script_with_special_character/script_with_special_character.py"
resolver = ToolResolver(FLOW_ROOT)
result = resolver._load_source_content(node)
assert "https://www.bing.com/\ue000\ue001/" in result
@pytest.mark.parametrize(
"source",
[
None,
ToolSource(path=None), # Then will try to read one directory.
ToolSource(path=""), # Then will try to read one directory.
ToolSource(path="NotExistPath.py"),
],
)
def test_load_source_error(self, source):
# Create a mock Node object with a valid source path
node = Node(name="mock", tool=None, inputs={}, source=source)
resolver = ToolResolver(FLOW_ROOT)
with pytest.raises(InvalidSource) as _:
resolver._load_source_content(node)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_input_assignment_parser.py | from typing import Any
import pytest
from promptflow._core._errors import NotSupported
from promptflow.contracts.flow import InputAssignment
from promptflow.executor._errors import (
InputNotFound,
InputNotFoundFromAncestorNodeOutput,
InvalidReferenceProperty,
UnsupportedReference,
)
from promptflow.executor._input_assignment_parser import parse_node_property, parse_value
FLOW_INPUTS = {"text": "hello promptflow"}
NODE_OUTPUTS = {"node1": "hello promptflow"}
class WrongInputAssignment:
value: Any
value_type: str = "wrong_type"
section: str = ""
property: str = ""
class DummyObject:
value: str = "dummy"
@pytest.mark.unittest
class TestInputAssignmentParser:
@pytest.mark.parametrize(
"input, expected_value",
[
("hello promptflow", "hello promptflow"),
("${inputs.text}", "hello promptflow"),
("${node1.output}", "hello promptflow"),
],
)
def test_parse_value(self, input, expected_value):
input_assignment = InputAssignment.deserialize(input)
actual_value = parse_value(input_assignment, NODE_OUTPUTS, FLOW_INPUTS)
assert actual_value == expected_value
@pytest.mark.parametrize(
"input, expected_error_class, expected_error_message",
[
(
"${inputs.word}",
InputNotFound,
(
"The input 'word' is not found from flow inputs 'text'. "
"Please check the input name and try again."
),
),
(
"${node2.output}",
InputNotFoundFromAncestorNodeOutput,
(
"The input 'node2' is not found from ancestor node outputs ['node1']. "
"Please check the node name and try again."
),
),
(
"${node1.word}",
UnsupportedReference,
(
"The section 'word' of reference is currently unsupported. "
"Please specify the output part of the node 'node1'."
),
),
(
WrongInputAssignment(),
NotSupported,
(
"The type 'wrong_type' is currently unsupported. "
"Please choose from available types: ['Literal', 'FlowInput', 'NodeReference'] and try again."
),
),
],
)
def test_parse_value_with_exception(self, input, expected_error_class, expected_error_message):
input_assignment = InputAssignment.deserialize(input) if isinstance(input, str) else input
with pytest.raises(expected_error_class) as e:
parse_value(input_assignment, NODE_OUTPUTS, FLOW_INPUTS)
assert e.value.message == f"Flow execution failed. {expected_error_message}"
@pytest.mark.parametrize(
"node_val, property, expected_value",
[
(
{"output": "hello promptflow"},
"output",
"hello promptflow",
),
(
{"output": "hello promptflow"},
"['output']",
"hello promptflow",
),
(
{"output": "hello promptflow"},
'["output"]',
"hello promptflow",
),
(
{"output": {"text": "hello promptflow"}},
'["output"]["text"]',
"hello promptflow",
),
(
["output1", "output2"],
"[1]",
"output2",
),
(
DummyObject(),
"value",
"dummy",
),
],
)
def test_parse_node_property(self, node_val, property, expected_value):
actual_value = parse_node_property("node1", node_val, property)
assert actual_value == expected_value
@pytest.mark.parametrize(
"node_val, property, expected_error_message",
[
(
{"output_str": ["output1", "output2"]},
"output_str[2]",
(
"Invalid property 'output_str[2]' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
(
{"word": "hello promptflow"},
"text",
(
"Invalid property 'text' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
(
DummyObject(),
"value_type",
(
"Invalid property 'value_type' when accessing the node 'node1'. "
"Please check the property and try again."
),
),
],
)
def test_parse_node_property_with_exception(self, node_val, property, expected_error_message):
with pytest.raises(InvalidReferenceProperty) as e:
parse_node_property("node1", node_val, property)
assert e.value.message == f"Flow execution failed. {expected_error_message}"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_executor.py | from unittest.mock import Mock
import pytest
from promptflow import tool
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
from promptflow.executor.flow_executor import (
FlowExecutor,
_ensure_node_result_is_serializable,
_inject_stream_options,
enable_streaming_for_llm_tool,
)
from promptflow.tools.aoai import chat, completion
from promptflow.tools.embedding import embedding
@pytest.mark.unittest
class TestFlowExecutor:
@pytest.mark.parametrize(
"flow_inputs, aggregated_flow_inputs, aggregation_inputs, expected_inputs",
[
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{},
{"input_from_default": ["default_value"]},
),
(
{
"input_no_default": FlowInputDefinition(type=ValueType.STRING),
},
{},
{},
{}, # No default value for input.
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"input_from_default": "input_value", "another_key": "input_value"},
{},
{"input_from_default": "input_value", "another_key": "input_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"another_key": ["input_value", "input_value"]},
{},
{
"input_from_default": ["default_value", "default_value"],
"another_key": ["input_value", "input_value"],
},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.BOOL, default=False),
},
{"another_key": ["input_value", "input_value"]},
{},
{
"input_from_default": [False, False],
"another_key": ["input_value", "input_value"],
},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{"another_key_in_aggregation_inputs": ["input_value", "input_value"]},
{
"input_from_default": ["default_value", "default_value"],
},
),
],
)
def test_apply_default_value_for_aggregation_input(
self, flow_inputs, aggregated_flow_inputs, aggregation_inputs, expected_inputs
):
result = FlowExecutor._apply_default_value_for_aggregation_input(
flow_inputs, aggregated_flow_inputs, aggregation_inputs
)
assert result == expected_inputs
def func_with_stream_parameter(a: int, b: int, stream=False):
return a + b, stream
def func_without_stream_parameter(a: int, b: int):
return a + b
class TestEnableStreamForLLMTool:
@pytest.mark.parametrize(
"tool, should_be_wrapped",
[
(completion, True),
(chat, True),
(embedding, False),
],
)
def test_enable_stream_for_llm_tool(self, tool, should_be_wrapped):
func = enable_streaming_for_llm_tool(tool)
is_wrapped = func != tool
assert is_wrapped == should_be_wrapped
def test_func_with_stream_parameter_should_be_wrapped(self):
func = enable_streaming_for_llm_tool(func_with_stream_parameter)
assert func != func_with_stream_parameter
result = func(a=1, b=2)
assert result == (3, True)
result = func_with_stream_parameter(a=1, b=2)
assert result == (3, False)
def test_func_without_stream_parameter_should_not_be_wrapped(self):
func = enable_streaming_for_llm_tool(func_without_stream_parameter)
assert func == func_without_stream_parameter
result = func(a=1, b=2)
assert result == 3
def test_inject_stream_options_no_stream_param(self):
# Test that the function does not wrap the decorated function if it has no stream parameter
func = _inject_stream_options(lambda: True)(func_without_stream_parameter)
assert func == func_without_stream_parameter
result = func(a=1, b=2)
assert result == 3
def test_inject_stream_options_with_stream_param(self):
# Test that the function wraps the decorated function and injects the stream option
func = _inject_stream_options(lambda: True)(func_with_stream_parameter)
assert func != func_with_stream_parameter
result = func(a=1, b=2)
assert result == (3, True)
result = func_with_stream_parameter(a=1, b=2)
assert result == (3, False)
def test_inject_stream_options_with_mocked_should_stream(self):
# Test that the function uses the should_stream callable to determine the stream option
should_stream = Mock(return_value=True)
func = _inject_stream_options(should_stream)(func_with_stream_parameter)
result = func(a=1, b=2)
assert result == (3, True)
should_stream.return_value = False
result = func(a=1, b=2)
assert result == (3, False)
@tool
def streaming_tool():
for i in range(10):
yield i
@tool
def non_streaming_tool():
return 1
class TestEnsureNodeResultIsSerializable:
def test_streaming_tool_should_be_consumed_and_merged(self):
func = _ensure_node_result_is_serializable(streaming_tool)
assert func() == "0123456789"
def test_non_streaming_tool_should_not_be_affected(self):
func = _ensure_node_result_is_serializable(non_streaming_tool)
assert func() == 1
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_exceptions.py | import pytest
from promptflow.exceptions import PromptflowException
@pytest.mark.unittest
class TestExceptions:
def test_exception_message(self):
ex = PromptflowException(
message_format="Test exception message with parameters: {param}, {param1}.",
param="test_param",
)
assert ex.message == "Test exception message with parameters: test_param, <param1>."
assert None not in ex.message_parameters
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_nodes_scheduler.py | from concurrent.futures import Future
from typing import Callable
from unittest.mock import MagicMock
import pytest
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow.contracts.flow import Node
from promptflow.executor._dag_manager import DAGManager
from promptflow.executor._flow_nodes_scheduler import (
DEFAULT_CONCURRENCY_BULK,
DEFAULT_CONCURRENCY_FLOW,
FlowNodesScheduler,
NoNodeExecutedError,
)
@pytest.mark.unittest
class TestFlowNodesScheduler:
def setup_method(self):
# Define mock objects and methods
self.tools_manager = MagicMock()
self.context = MagicMock(spec=FlowExecutionContext)
self.context.invoke_tool.side_effect = lambda _, func, kwargs: func(**kwargs)
self.scheduler = FlowNodesScheduler(self.tools_manager, {}, [], DEFAULT_CONCURRENCY_BULK, self.context)
def test_maximun_concurrency(self):
scheduler = FlowNodesScheduler(self.tools_manager, {}, [], 1000, self.context)
assert scheduler._node_concurrency == DEFAULT_CONCURRENCY_FLOW
def test_collect_outputs(self):
future1 = Future()
future1.set_result("output1")
future2 = Future()
future2.set_result("output2")
node1 = MagicMock(spec=Node)
node1.name = "node1"
node2 = MagicMock(spec=Node)
node2.name = "node2"
self.scheduler._future_to_node = {future1: node1, future2: node2}
completed_nodes_outputs = self.scheduler._collect_outputs([future1, future2])
assert completed_nodes_outputs == {"node1": future1.result(), "node2": future2.result()}
def test_bypass_nodes(self):
executor = MagicMock()
dag_manager = MagicMock(spec=DAGManager)
node1 = MagicMock(spec=Node)
node1.name = "node1"
# The return value will be a list with one item for the first time.
# Will be a list without item for the second time.
dag_manager.pop_bypassable_nodes.side_effect = ([node1], [])
self.scheduler._dag_manager = dag_manager
self.scheduler._execute_nodes(executor)
self.scheduler._context.bypass_node.assert_called_once_with(node1)
def test_submit_nodes(self):
executor = MagicMock()
dag_manager = MagicMock(spec=DAGManager)
node1 = MagicMock(spec=Node)
node1.name = "node1"
dag_manager.pop_bypassable_nodes.return_value = []
# The return value will be a list with one item for the first time.
# Will be a list without item for the second time.
dag_manager.pop_ready_nodes.return_value = [node1]
self.scheduler._dag_manager = dag_manager
self.scheduler._execute_nodes(executor)
self.scheduler._context.bypass_node.assert_not_called()
assert node1 in self.scheduler._future_to_node.values()
def test_future_cancelled_for_exception(self):
dag_manager = MagicMock(spec=DAGManager)
self.scheduler._dag_manager = dag_manager
dag_manager.completed.return_value = False
dag_manager.pop_bypassable_nodes.return_value = []
dag_manager.pop_ready_nodes.return_value = []
failed_future = Future()
failed_future.set_exception(Exception("test"))
from concurrent.futures._base import CANCELLED, FINISHED
failed_future._state = FINISHED
cancelled_future = Future()
node1 = MagicMock(spec=Node)
node1.name = "node1"
node2 = MagicMock(spec=Node)
node2.name = "node2"
self.scheduler._future_to_node = {failed_future: node1, cancelled_future: node2}
try:
self.scheduler.execute()
except Exception:
pass
# Assert another future is cancelled.
assert CANCELLED in cancelled_future._state
def test_success_result(self):
dag_manager = MagicMock(spec=DAGManager)
finished_future = Future()
finished_future.set_result("output1")
finished_node = MagicMock(spec=Node)
finished_node.name = "node1"
self.scheduler._dag_manager = dag_manager
self.scheduler._future_to_node = {finished_future: finished_node}
# No more nodes need to run.
dag_manager.pop_bypassable_nodes.return_value = []
dag_manager.pop_ready_nodes.return_value = []
dag_manager.completed.side_effect = (False, True)
bypassed_node_result = {"bypassed_node": "output2"}
dag_manager.bypassed_nodes = bypassed_node_result
completed_node_result = {"completed_node": "output1"}
dag_manager.completed_nodes_outputs = completed_node_result
result = self.scheduler.execute()
dag_manager.complete_nodes.assert_called_once_with({"node1": "output1"})
assert result == (completed_node_result, bypassed_node_result)
def test_no_nodes_to_run(self):
dag_manager = MagicMock(spec=DAGManager)
dag_manager.pop_bypassable_nodes.return_value = []
dag_manager.pop_ready_nodes.return_value = []
dag_manager.completed.return_value = False
self.scheduler._dag_manager = dag_manager
with pytest.raises(NoNodeExecutedError) as _:
self.scheduler.execute()
def test_execute_single_node(self):
node_to_run = MagicMock(spec=Node)
node_to_run.name = "node1"
mock_callable = MagicMock(spec=Callable)
mock_callable.return_value = "output1"
self.scheduler._tools_manager.get_tool.return_value = mock_callable
dag_manager = MagicMock(spec=DAGManager)
dag_manager.get_node_valid_inputs.return_value = {"input": 1}
result = self.scheduler._exec_single_node_in_thread((node_to_run, dag_manager))
mock_callable.assert_called_once_with(**{"input": 1})
assert result == "output1"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/executor/test_errors.py | import pytest
from promptflow._core.tool_meta_generator import PythonLoadError
from promptflow.exceptions import ErrorTarget
from promptflow.executor._errors import ResolveToolError
def code_with_bug():
1 / 0
def raise_resolve_tool_error(func, target=None, module=None):
try:
func()
except Exception as e:
if target:
raise ResolveToolError(node_name="MyTool", target=target, module=module) from e
raise ResolveToolError(node_name="MyTool") from e
def raise_python_load_error():
try:
code_with_bug()
except Exception as e:
raise PythonLoadError(message="Test PythonLoadError.") from e
def test_resolve_tool_error():
with pytest.raises(ResolveToolError) as e:
raise_resolve_tool_error(raise_python_load_error, ErrorTarget.TOOL, "__pf_main__")
exception = e.value
inner_exception = exception.inner_exception
assert isinstance(inner_exception, PythonLoadError)
assert exception.message == "Tool load failed in 'MyTool': (PythonLoadError) Test PythonLoadError."
assert exception.additional_info == inner_exception.additional_info
assert exception.error_codes == ["UserError", "ToolValidationError", "PythonParsingError", "PythonLoadError"]
assert exception.reference_code == "Tool/__pf_main__"
def test_resolve_tool_error_with_none_inner():
with pytest.raises(ResolveToolError) as e:
raise ResolveToolError(node_name="MyTool")
exception = e.value
assert exception.inner_exception is None
assert exception.message == "Tool load failed in 'MyTool'."
assert exception.additional_info is None
assert exception.error_codes == ["SystemError", "ResolveToolError"]
assert exception.reference_code == "Executor"
def test_resolve_tool_error_with_no_PromptflowException_inner():
with pytest.raises(ResolveToolError) as e:
raise_resolve_tool_error(code_with_bug)
exception = e.value
assert isinstance(exception.inner_exception, ZeroDivisionError)
assert exception.message == "Tool load failed in 'MyTool': (ZeroDivisionError) division by zero"
assert exception.additional_info is None
assert exception.error_codes == ["SystemError", "ZeroDivisionError"]
assert exception.reference_code == "Executor"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_base_executor_proxy.py | import json
from pathlib import Path
from tempfile import mkdtemp
from typing import Optional
from unittest.mock import AsyncMock, patch
import httpx
import pytest
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow.batch._base_executor_proxy import APIBasedExecutorProxy
from promptflow.batch._errors import ExecutorServiceUnhealthy
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, ValidationException
from promptflow.executor._errors import ConnectionNotFound
from promptflow.storage._run_storage import AbstractRunStorage
from ...mock_execution_server import _get_aggr_result_dict, _get_line_result_dict
@pytest.mark.unittest
class TestAPIBasedExecutorProxy:
@pytest.mark.asyncio
@pytest.mark.parametrize(
"has_error",
[False, True],
)
async def test_exec_line_async(self, has_error):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
run_id = "test_run_id"
index = 1
inputs = {"question": "test"}
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock:
line_result_dict = _get_line_result_dict(run_id, index, inputs, has_error=has_error)
status_code = 400 if has_error else 200
mock.return_value = httpx.Response(status_code, json=line_result_dict)
line_result = await mock_executor_proxy.exec_line_async(inputs, index, run_id)
assert line_result.output == {} if has_error else {"answer": "Hello world!"}
assert line_result.run_info.run_id == run_id
assert line_result.run_info.index == index
assert line_result.run_info.status == Status.Failed if has_error else Status.Completed
assert line_result.run_info.inputs == inputs
assert (line_result.run_info.error is not None) == has_error
@pytest.mark.asyncio
async def test_exec_aggregation_async(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
run_id = "test_run_id"
batch_inputs = {"question": ["test", "error"]}
aggregation_inputs = {"${get_answer.output}": ["Incorrect", "Correct"]}
with patch("httpx.AsyncClient.post", new_callable=AsyncMock) as mock:
aggr_result_dict = _get_aggr_result_dict(run_id, aggregation_inputs)
mock.return_value = httpx.Response(200, json=aggr_result_dict)
aggr_result = await mock_executor_proxy.exec_aggregation_async(batch_inputs, aggregation_inputs, run_id)
assert aggr_result.metrics == {"accuracy": 0.5}
assert len(aggr_result.node_run_infos) == 1
assert aggr_result.node_run_infos["aggregation"].flow_run_id == run_id
assert aggr_result.node_run_infos["aggregation"].inputs == aggregation_inputs
assert aggr_result.node_run_infos["aggregation"].status == Status.Completed
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_no_error(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
with patch.object(APIBasedExecutorProxy, "_check_startup_error_from_file") as mock_check_startup_error:
await mock_executor_proxy.ensure_executor_startup("")
mock_check_startup_error.assert_not_called()
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_not_healthy(self):
# empty error file
error_file = Path(mkdtemp()) / "error.json"
error_file.touch()
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
mock.side_effect = ExecutorServiceUnhealthy("executor unhealthy")
with pytest.raises(ExecutorServiceUnhealthy) as ex:
await mock_executor_proxy.ensure_executor_startup(error_file)
assert ex.value.message == "executor unhealthy"
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_startup_when_existing_validation_error(self):
# prepare the error file
error_file = Path(mkdtemp()) / "error.json"
error_message = "Connection 'aoai_conn' not found"
error_dict = ExceptionPresenter.create(ConnectionNotFound(message=error_message)).to_dict()
with open(error_file, "w") as file:
json.dump(error_dict, file, indent=4)
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "ensure_executor_health", new_callable=AsyncMock) as mock:
mock.side_effect = ExecutorServiceUnhealthy("executor unhealthy")
with pytest.raises(ValidationException) as ex:
await mock_executor_proxy.ensure_executor_startup(error_file)
assert ex.value.message == error_message
assert ex.value.target == ErrorTarget.BATCH
@pytest.mark.asyncio
async def test_ensure_executor_health_when_healthy(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=True) as mock:
await mock_executor_proxy.ensure_executor_health()
mock.assert_called_once()
@pytest.mark.asyncio
async def test_ensure_executor_health_when_unhealthy(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=False) as mock:
with pytest.raises(ExecutorServiceUnhealthy):
await mock_executor_proxy.ensure_executor_health()
assert mock.call_count == 20
@pytest.mark.asyncio
async def test_ensure_executor_health_when_not_active(self):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch.object(APIBasedExecutorProxy, "_check_health", return_value=False) as mock:
with patch.object(APIBasedExecutorProxy, "_is_executor_active", return_value=False):
with pytest.raises(ExecutorServiceUnhealthy):
await mock_executor_proxy.ensure_executor_health()
mock.assert_not_called()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"mock_value, expected_result",
[
(httpx.Response(200), True),
(httpx.Response(500), False),
(Exception("error"), False),
],
)
async def test_check_health(self, mock_value, expected_result):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
with patch("httpx.AsyncClient.get", new_callable=AsyncMock) as mock:
mock.return_value = mock_value
assert await mock_executor_proxy._check_health() is expected_result
@pytest.mark.asyncio
@pytest.mark.parametrize(
"response, expected_result",
[
(
httpx.Response(200, json={"result": "test"}),
{"result": "test"},
),
(
httpx.Response(500, json={"error": "test error"}),
"test error",
),
(
httpx.Response(400, json={"detail": "test"}),
{
"message": 'Unexpected error when executing a line, status code: 400, error: {"detail": "test"}',
"messageFormat": (
"Unexpected error when executing a line, " "status code: {status_code}, error: {error}"
),
"messageParameters": {
"status_code": "400",
"error": '{"detail": "test"}',
},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": {
"code": "UnexpectedError",
"innerError": None,
},
},
),
(
httpx.Response(502, text="test"),
{
"message": "Unexpected error when executing a line, status code: 502, error: test",
"messageFormat": (
"Unexpected error when executing a line, " "status code: {status_code}, error: {error}"
),
"messageParameters": {
"status_code": "502",
"error": "test",
},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": {
"code": "UnexpectedError",
"innerError": None,
},
},
),
],
)
async def test_process_http_response(self, response, expected_result):
mock_executor_proxy = await MockAPIBasedExecutorProxy.create("")
assert mock_executor_proxy._process_http_response(response) == expected_result
class MockAPIBasedExecutorProxy(APIBasedExecutorProxy):
@property
def api_endpoint(self) -> str:
return "http://localhost:8080"
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "MockAPIBasedExecutorProxy":
return MockAPIBasedExecutorProxy()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_batch_inputs_processor.py | import json
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._core._errors import UnexpectedError
from promptflow._utils.utils import dump_list_to_jsonl
from promptflow.batch._batch_inputs_processor import BatchInputsProcessor, apply_inputs_mapping
from promptflow.batch._errors import EmptyInputsData, InputMappingError
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
from ...utils import DATA_ROOT
@pytest.mark.unittest
class TestBatchInputsProcessor:
def test_process_batch_inputs(self):
data = [
{"question": "What's promptflow?"},
{"question": "Do you like promptflow?"},
]
data_file = Path(mkdtemp()) / "data.jsonl"
dump_list_to_jsonl(data_file, data)
input_dirs = {"data": data_file}
inputs_mapping = {"question": "${data.question}"}
batch_inputs = BatchInputsProcessor("", {}).process_batch_inputs(input_dirs, inputs_mapping)
assert batch_inputs == [
{"line_number": 0, "question": "What's promptflow?"},
{"line_number": 1, "question": "Do you like promptflow?"},
]
def test_process_batch_inputs_error(self):
data_file = Path(mkdtemp()) / "data.jsonl"
data_file.touch()
input_dirs = {"data": data_file}
inputs_mapping = {"question": "${data.question}"}
with pytest.raises(EmptyInputsData) as e:
BatchInputsProcessor("", {}).process_batch_inputs(input_dirs, inputs_mapping)
expected_error_message = (
"Couldn't find any inputs data at the given input paths. "
"Please review the provided path and consider resubmitting."
)
assert expected_error_message in e.value.message
def test_resolve_data_from_input_path(self):
inputs_dir = Path(mkdtemp())
# data.jsonl
data = [
{"question": "What's promptflow?"},
{"question": "Do you like promptflow?"},
]
data_file = inputs_dir / "data.jsonl"
dump_list_to_jsonl(data_file, data)
# inputs.json
inputs_file = inputs_dir / "inputs.json"
with open(inputs_file, "w") as file:
file.write(json.dumps(data))
result = BatchInputsProcessor("", {})._resolve_data_from_input_path(inputs_dir)
assert result == data + data
# if has max_lines_count
result = BatchInputsProcessor("", {}, max_lines_count=1)._resolve_data_from_input_path(inputs_dir)
assert result == [
{"question": "What's promptflow?"},
]
@pytest.mark.parametrize(
"data_path",
[
"10k.jsonl",
"10k",
],
)
def test_resolve_data_from_input_path_with_large_data(self, data_path):
data_path = DATA_ROOT / "load_data_cases" / data_path
result = BatchInputsProcessor("", {})._resolve_data_from_input_path(Path(data_path))
assert isinstance(result, list)
assert len(result) == 10000
# specify max_rows_count
max_rows_count = 5
head_results = BatchInputsProcessor(
working_dir="",
flow_inputs={},
max_lines_count=max_rows_count,
)._resolve_data_from_input_path(Path(data_path))
assert isinstance(head_results, list)
assert len(head_results) == max_rows_count
assert result[:max_rows_count] == head_results
@pytest.mark.parametrize(
"inputs, inputs_mapping, expected",
[
(
{"data.test": {"question": "longer input key has lower priority."}, "line_number": 0},
{
"question": "${data.test.question}", # Question from the data
"value": 1,
},
{"question": "longer input key has lower priority.", "value": 1, "line_number": 0},
),
(
{
# Missing line_number is also valid data.
"data.test": {"question": "longer input key has lower priority."},
"data": {"test.question": "Shorter input key has higher priority."},
},
{
"question": "${data.test.question}", # Question from the data
"deployment_name": "text-davinci-003", # literal value
},
{
"question": "Shorter input key has higher priority.",
"deployment_name": "text-davinci-003",
},
),
],
)
def test_apply_inputs_mapping(self, inputs, inputs_mapping, expected):
result = apply_inputs_mapping(inputs, inputs_mapping)
assert expected == result, "Expected: {}, Actual: {}".format(expected, result)
@pytest.mark.parametrize(
"inputs, inputs_mapping, error_code, error_message",
[
(
{
"baseline": {"answer": 123, "question": "dummy"},
},
{
"question": "${baseline.output}",
"answer": "${data.output}",
},
InputMappingError,
"Couldn't find these mapping relations: ${baseline.output}, ${data.output}. "
"Please make sure your input mapping keys and values match your YAML input section and input data.",
),
],
)
def test_apply_inputs_mapping_error(self, inputs, inputs_mapping, error_code, error_message):
with pytest.raises(error_code) as e:
apply_inputs_mapping(inputs, inputs_mapping)
assert error_message in str(e.value), "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize(
"inputs, expected",
[
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"output": [{"answer": "output_ans1"}, {"answer": "output_ans2"}],
},
[
# Get 2 lines data.
{
"data": {"question": "q1", "answer": "ans1"},
"output": {"answer": "output_ans1"},
"line_number": 0,
},
{
"data": {"question": "q2", "answer": "ans2"},
"output": {"answer": "output_ans2"},
"line_number": 1,
},
],
),
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"output": [{"answer": "output_ans2", "line_number": 1}],
},
[
# Only one line valid data.
{
"data": {"question": "q2", "answer": "ans2"},
"output": {"answer": "output_ans2", "line_number": 1},
"line_number": 1,
},
],
),
],
)
def test_merge_input_dicts_by_line(self, inputs, expected):
result = BatchInputsProcessor("", {})._merge_input_dicts_by_line(inputs)
json.dumps(result)
assert expected == result, "Expected: {}, Actual: {}".format(expected, result)
@pytest.mark.parametrize(
"inputs, error_code, error_message",
[
(
{
"baseline": [],
},
InputMappingError,
"The input for batch run is incorrect. Input from key 'baseline' is an empty list, which means we "
"cannot generate a single line input for the flow run. Please rectify the input and try again.",
),
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"baseline": [{"answer": "baseline_ans2"}],
},
InputMappingError,
"The input for batch run is incorrect. Line numbers are not aligned. Some lists have dictionaries "
"missing the 'line_number' key, and the lengths of these lists are different. List lengths are: "
"{'data': 2, 'baseline': 1}. Please make sure these lists have the same length "
"or add 'line_number' key to each dictionary.",
),
],
)
def test_merge_input_dicts_by_line_error(self, inputs, error_code, error_message):
with pytest.raises(error_code) as e:
BatchInputsProcessor("", {})._merge_input_dicts_by_line(inputs)
assert error_message == str(e.value), "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize("inputs_mapping", [{"question": "${data.question}"}, {}])
def test_complete_inputs_mapping_by_default_value(self, inputs_mapping):
inputs = {
"question": None,
"groundtruth": None,
"input_with_default_value": FlowInputDefinition(type=ValueType.BOOL, default=False),
}
updated_inputs_mapping = BatchInputsProcessor("", inputs)._complete_inputs_mapping_by_default_value(
inputs_mapping
)
assert "input_with_default_value" not in updated_inputs_mapping
assert updated_inputs_mapping == {"question": "${data.question}", "groundtruth": "${data.groundtruth}"}
@pytest.mark.parametrize(
"inputs, inputs_mapping, expected",
[
(
# Use default mapping generated from flow inputs.
{
"data": [{"question": "q1", "groundtruth": "ans1"}, {"question": "q2", "groundtruth": "ans2"}],
},
{},
[
{
"question": "q1",
"groundtruth": "ans1",
"line_number": 0,
},
{
"question": "q2",
"groundtruth": "ans2",
"line_number": 1,
},
],
),
(
# Partially use default mapping generated from flow inputs.
{
"data": [{"question": "q1", "groundtruth": "ans1"}, {"question": "q2", "groundtruth": "ans2"}],
},
{
"question": "${data.question}",
},
[
{
"question": "q1",
"groundtruth": "ans1",
"line_number": 0,
},
{
"question": "q2",
"groundtruth": "ans2",
"line_number": 1,
},
],
),
(
{
"data": [
{"question": "q1", "answer": "ans1", "line_number": 5},
{"question": "q2", "answer": "ans2", "line_number": 6},
],
"baseline": [
{"answer": "baseline_ans1", "line_number": 5},
{"answer": "baseline_ans2", "line_number": 7},
],
},
{
"question": "${data.question}", # Question from the data
"groundtruth": "${data.answer}", # Answer from the data
"baseline": "${baseline.answer}", # Answer from the baseline
"deployment_name": "text-davinci-003", # literal value
"line_number": "${data.question}", # line_number mapping should be ignored
},
[
{
"question": "q1",
"groundtruth": "ans1",
"baseline": "baseline_ans1",
"deployment_name": "text-davinci-003",
"line_number": 5,
},
],
),
],
)
def test_validate_and_apply_inputs_mapping(self, inputs, inputs_mapping, expected):
flow_inputs = {"question": None, "groundtruth": None}
result = BatchInputsProcessor("", flow_inputs)._validate_and_apply_inputs_mapping(inputs, inputs_mapping)
assert expected == result, "Expected: {}, Actual: {}".format(expected, result)
def test_validate_and_apply_inputs_mapping_empty_input(self):
inputs = {
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
"baseline": [{"answer": "baseline_ans1"}, {"answer": "baseline_ans2"}],
}
result = BatchInputsProcessor("", {})._validate_and_apply_inputs_mapping(inputs, {})
assert result == [
{"line_number": 0},
{"line_number": 1},
], "Empty flow inputs and inputs_mapping should return list with empty dicts."
@pytest.mark.parametrize(
"inputs_mapping, error_code",
[
(
{"question": "${question}"},
InputMappingError,
),
],
)
def test_validate_and_apply_inputs_mapping_error(self, inputs_mapping, error_code):
flow_inputs = {"question": None}
with pytest.raises(error_code) as _:
BatchInputsProcessor("", flow_inputs)._validate_and_apply_inputs_mapping(
inputs={}, inputs_mapping=inputs_mapping
)
@pytest.mark.parametrize(
"inputs, inputs_mapping, error_code, error_message",
[
(
{
"data": [{"question": "q1", "answer": "ans1"}, {"question": "q2", "answer": "ans2"}],
},
None,
UnexpectedError,
"The input for batch run is incorrect. Please make sure to set up a proper input mapping "
"before proceeding. If you need additional help, feel free to contact support for further assistance.",
),
],
)
def test_inputs_mapping_for_all_lines_error(self, inputs, inputs_mapping, error_code, error_message):
with pytest.raises(error_code) as e:
BatchInputsProcessor("", {})._apply_inputs_mapping_for_all_lines(inputs, inputs_mapping)
assert error_message == str(e.value), "Expected: {}, Actual: {}".format(error_message, str(e.value))
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_batch_engine.py | from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import Mock, patch
import pytest
from promptflow._core._errors import UnexpectedError
from promptflow.batch import APIBasedExecutorProxy, BatchEngine, CSharpExecutorProxy, PythonExecutorProxy
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget
from promptflow.executor._errors import ConnectionNotFound
from promptflow.executor._result import AggregationResult
from ...utils import MemoryRunStorage, get_yaml_file, load_jsonl
from .test_result import get_line_results, get_node_run_infos
@pytest.mark.unittest
class TestBatchEngine:
@pytest.mark.parametrize(
"side_effect, ex_type, ex_target, ex_codes, ex_msg",
[
(
Exception("test error"),
UnexpectedError,
ErrorTarget.BATCH,
["SystemError", "UnexpectedError"],
"Unexpected error occurred while executing the batch run. Error: (Exception) test error.",
),
(
ConnectionNotFound(message="Connection 'aoai_conn' not found"),
ConnectionNotFound,
ErrorTarget.EXECUTOR,
["UserError", "ValidationError", "InvalidRequest", "ConnectionNotFound"],
"Connection 'aoai_conn' not found",
),
],
)
def test_batch_engine_run_error(self, side_effect, ex_type, ex_target, ex_codes, ex_msg):
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
with patch("promptflow.batch._batch_engine.BatchEngine._exec_in_task") as mock_func:
mock_func.side_effect = side_effect
with patch(
"promptflow.batch._batch_inputs_processor.BatchInputsProcessor.process_batch_inputs", new=Mock()
):
with pytest.raises(ex_type) as e:
batch_engine.run({}, {}, Path("."))
assert e.value.target == ex_target
assert e.value.error_codes == ex_codes
assert e.value.message == ex_msg
def test_register_executor(self):
# assert original values
assert BatchEngine.executor_proxy_classes["python"] == PythonExecutorProxy
assert BatchEngine.executor_proxy_classes["csharp"] == CSharpExecutorProxy
class MockJSExecutorProxy(APIBasedExecutorProxy):
pass
# register new proxy
BatchEngine.register_executor("js", MockJSExecutorProxy)
assert BatchEngine.executor_proxy_classes["js"] == MockJSExecutorProxy
assert len(BatchEngine.executor_proxy_classes) == 3
def test_cancel(self):
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
assert batch_engine._is_canceled is False
batch_engine.cancel()
assert batch_engine._is_canceled is True
def test_persist_run_info(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
line_results = get_line_results(line_dict)
mem_run_storge = MemoryRunStorage()
batch_engine = BatchEngine(get_yaml_file("print_input_flow"), "", storage=mem_run_storge)
batch_engine._persist_run_info(line_results)
assert len(mem_run_storge._flow_runs) == 3
assert len(mem_run_storge._node_runs) == 9
def test_persist_outputs(self):
outputs = [
{"line_number": 0, "output": "Hello World!"},
{"line_number": 1, "output": "Hello Microsoft!"},
{"line_number": 2, "output": "Hello Promptflow!"},
]
output_dir = Path(mkdtemp())
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
batch_engine._persist_outputs(outputs, output_dir)
actual_outputs = load_jsonl(output_dir / "output.jsonl")
assert actual_outputs == outputs
def test_update_aggr_result(self):
output = {"output": "Hello World!"}
metrics = {"accuracy": 0.9}
node_run_infos = get_node_run_infos({"aggr_1": Status.Completed, "aggr_2": Status.Completed})
aggre_result = AggregationResult(output={}, metrics={}, node_run_infos={})
aggr_exec_result = AggregationResult(output=output, metrics=metrics, node_run_infos=node_run_infos)
batch_engine = BatchEngine(get_yaml_file("print_input_flow"))
batch_engine._update_aggr_result(aggre_result, aggr_exec_result)
assert aggre_result.output == output
assert aggre_result.metrics == metrics
assert aggre_result.node_run_infos == node_run_infos
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_csharp_executor_proxy.py | import json
import socket
import subprocess
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import MagicMock, patch
import pytest
from promptflow._core._errors import MetaFileNotFound, MetaFileReadError
from promptflow._sdk._constants import FLOW_TOOLS_JSON, PROMPT_FLOW_DIR_NAME
from promptflow.batch import CSharpExecutorProxy
from promptflow.executor._result import AggregationResult
from ...utils import get_flow_folder, get_yaml_file
async def get_executor_proxy():
flow_file = get_yaml_file("csharp_flow")
working_dir = get_flow_folder("csharp_flow")
with patch.object(CSharpExecutorProxy, "ensure_executor_startup", return_value=None):
return await CSharpExecutorProxy.create(flow_file, working_dir)
@pytest.mark.unittest
class TestCSharpExecutorProxy:
@pytest.mark.asyncio
async def test_create(self):
with patch("subprocess.Popen") as mock_popen:
mock_popen.return_value = MagicMock()
executor_proxy = await get_executor_proxy()
mock_popen.assert_called_once()
assert executor_proxy is not None
assert executor_proxy._process is not None
assert executor_proxy._port is not None
assert executor_proxy.api_endpoint == f"http://localhost:{executor_proxy._port}"
@pytest.mark.asyncio
async def test_destroy_with_already_terminated(self):
mock_process = MagicMock()
mock_process.poll.return_value = 0
executor_proxy = await get_executor_proxy()
executor_proxy._process = mock_process
await executor_proxy.destroy()
mock_process.poll.assert_called_once()
mock_process.terminate.assert_not_called()
@pytest.mark.asyncio
async def test_destroy_with_terminates_gracefully(self):
mock_process = MagicMock()
mock_process.poll.return_value = None
executor_proxy = await get_executor_proxy()
executor_proxy._process = mock_process
await executor_proxy.destroy()
mock_process.poll.assert_called_once()
mock_process.terminate.assert_called_once()
mock_process.wait.assert_called_once_with(timeout=5)
mock_process.kill.assert_not_called()
@pytest.mark.asyncio
async def test_destroy_with_force_kill(self):
mock_process = MagicMock()
mock_process.poll.return_value = None
mock_process.wait.side_effect = subprocess.TimeoutExpired(cmd="cmd", timeout=5)
executor_proxy = await get_executor_proxy()
executor_proxy._process = mock_process
await executor_proxy.destroy()
mock_process.poll.assert_called_once()
mock_process.terminate.assert_called_once()
mock_process.wait.assert_called_once_with(timeout=5)
mock_process.kill.assert_called_once()
@pytest.mark.asyncio
async def test_exec_aggregation_async(self):
executor_proxy = await get_executor_proxy()
aggr_result = await executor_proxy.exec_aggregation_async("", "", "")
assert isinstance(aggr_result, AggregationResult)
@pytest.mark.asyncio
@pytest.mark.parametrize(
"exit_code, expected_result",
[
(None, True),
(0, False),
(1, False),
],
)
async def test_is_executor_active(self, exit_code, expected_result):
executor_proxy = await get_executor_proxy()
executor_proxy._process = MagicMock()
executor_proxy._process.poll.return_value = exit_code
assert executor_proxy._is_executor_active() == expected_result
def test_get_tool_metadata_succeed(self):
working_dir = Path(mkdtemp())
expected_tool_meta = {"name": "csharp_flow", "version": "0.1.0"}
tool_meta_file = working_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
tool_meta_file.parent.mkdir(parents=True, exist_ok=True)
with open(tool_meta_file, "w") as file:
json.dump(expected_tool_meta, file, indent=4)
tool_meta = CSharpExecutorProxy.get_tool_metadata("", working_dir)
assert tool_meta == expected_tool_meta
def test_get_tool_metadata_failed_with_file_not_found(self):
working_dir = Path(mkdtemp())
with pytest.raises(MetaFileNotFound):
CSharpExecutorProxy.get_tool_metadata("", working_dir)
def test_get_tool_metadata_failed_with_content_not_json(self):
working_dir = Path(mkdtemp())
tool_meta_file = working_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
tool_meta_file.parent.mkdir(parents=True, exist_ok=True)
tool_meta_file.touch()
with pytest.raises(MetaFileReadError):
CSharpExecutorProxy.get_tool_metadata("", working_dir)
def test_find_available_port(self):
port = CSharpExecutorProxy.find_available_port()
assert isinstance(port, str)
assert int(port) > 0, "Port number should be greater than 0"
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(("localhost", int(port)))
except OSError:
pytest.fail("Port is not actually available")
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/batch/test_result.py | from datetime import datetime
import pytest
from promptflow.batch._result import BatchResult, ErrorSummary, LineError, SystemMetrics
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
from promptflow.contracts.run_info import Status
from promptflow.executor._result import AggregationResult, LineResult
def get_node_run_infos(node_dict: dict, index=None, api_calls=None, system_metrics=None):
return {
k: NodeRunInfo(
node=k,
flow_run_id="flow_run_id",
run_id=f"{index}_run_id_{k}",
status=v,
inputs=[],
output={},
metrics={},
error={"code": "UserError", "message": "test message"} if v == Status.Failed else None,
parent_run_id="",
start_time=None,
end_time=None,
index=index,
api_calls=api_calls,
system_metrics=system_metrics,
)
for k, v in node_dict.items()
}
def get_flow_run_info(status_dict: dict, index: int):
status = Status.Failed if any(status == Status.Failed for status in status_dict.values()) else Status.Completed
error = {"code": "UserError", "message": "test message"} if status == Status.Failed else None
return FlowRunInfo(
run_id=f"{index}_run_id",
status=status,
error=error,
inputs={},
output={},
metrics={},
request=None,
parent_run_id="",
root_run_id="",
source_run_id="",
flow_id="",
start_time=datetime.utcnow(),
end_time=datetime.utcnow(),
index=index,
)
def get_line_results(line_dict: dict, api_calls=None, system_metrics=None):
return [
LineResult(
output={},
aggregation_inputs={},
run_info=get_flow_run_info(status_dict=v, index=k),
node_run_infos=get_node_run_infos(node_dict=v, index=k, api_calls=api_calls, system_metrics=system_metrics),
)
for k, v in line_dict.items()
]
def get_aggregation_result(aggr_dict: dict, api_calls=None, system_metrics=None):
return AggregationResult(
output={},
metrics={},
node_run_infos=get_node_run_infos(node_dict=aggr_dict, api_calls=api_calls, system_metrics=system_metrics),
)
def get_batch_result(line_dict, aggr_dict, line_api_calls=None, aggr_api_calls=None):
line_results = get_line_results(line_dict=line_dict, api_calls=line_api_calls)
aggr_result = get_aggregation_result(aggr_dict=aggr_dict, api_calls=aggr_api_calls)
return BatchResult.create(datetime.utcnow(), datetime.utcnow(), line_results=line_results, aggr_result=aggr_result)
def get_api_call(type, name, inputs={}, output={}, children=None):
return {"type": type, "name": name, "inputs": inputs, "output": output, "children": children}
@pytest.mark.unittest
class TestBatchResult:
def test_node_status(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
aggr_dict = {"aggr_0": Status.Completed, "aggr_1": Status.Failed, "aggr_2": Status.Bypassed}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict=aggr_dict)
assert batch_result.total_lines == 3
assert batch_result.completed_lines == 2
assert batch_result.failed_lines == 1
assert batch_result.node_status == {
"node_0.completed": 3,
"node_1.completed": 2,
"node_1.failed": 1,
"node_2.completed": 2,
"node_2.bypassed": 1,
"aggr_0.completed": 1,
"aggr_1.failed": 1,
"aggr_2.bypassed": 1,
}
def test_system_metrics(self):
from openai.types.completion import Completion, CompletionChoice
line_dict = {0: {"node_0": Status.Completed}}
aggr_dict = {"aggr_0": Status.Completed}
api_call_1 = get_api_call(
"LLM",
"openai.resources.completions.Completions.create",
inputs={"prompt": "Please tell me a joke.", "model": "text-davinci-003"},
output={"choices": [{"text": "text"}]},
)
api_call_2 = get_api_call(
"LLM",
"openai.resources.completions.Completions.create",
inputs={
"prompt": ["Please tell me a joke.", "Please tell me a joke about fruit."],
"model": "text-davinci-003",
},
output=[
Completion(
choices=[CompletionChoice(text="text", finish_reason="stop", index=0, logprobs=None)],
id="id",
created=0,
model="model",
object="text_completion",
),
Completion(
choices=[CompletionChoice(text="text", finish_reason="stop", index=0, logprobs=None)],
id="id",
created=0,
model="model",
object="text_completion",
),
],
)
line_api_calls = get_api_call("Chain", "Chain", children=[api_call_1, api_call_2])
aggr_api_call = get_api_call(
"LLM",
"openai.resources.chat.completions.Completions.create",
inputs={
"messages": [{"system": "You are a helpful assistant.", "user": "Please tell me a joke."}],
"model": "gpt-35-turbo",
},
output={"choices": [{"message": {"content": "content"}}]},
)
batch_result = get_batch_result(
line_dict=line_dict, aggr_dict=aggr_dict, line_api_calls=[line_api_calls], aggr_api_calls=[aggr_api_call]
)
assert batch_result.system_metrics.total_tokens == 42
assert batch_result.system_metrics.prompt_tokens == 38
assert batch_result.system_metrics.completion_tokens == 4
system_metrics_dict = {
"total_tokens": 42,
"prompt_tokens": 38,
"completion_tokens": 4,
}
assert system_metrics_dict.items() <= batch_result.system_metrics.to_dict().items()
@pytest.mark.parametrize(
"api_call",
[
get_api_call("LLM", "Completion", inputs="invalid"),
get_api_call("LLM", "Completion", output="invalid"),
get_api_call("LLM", "Invalid"),
get_api_call("LLM", "Completion"),
get_api_call("LLM", "Completion", inputs={"api_type": "azure"}),
get_api_call("LLM", "ChatCompletion", inputs={"api_type": "azure", "engine": "invalid"}),
],
)
def test_invalid_api_calls(self, api_call):
line_dict = {0: {"node_0": Status.Completed}}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict={}, line_api_calls=[api_call])
assert batch_result.system_metrics.total_tokens == 0
assert batch_result.system_metrics.completion_tokens == 0
assert batch_result.system_metrics.prompt_tokens == 0
def test_error_summary(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
aggr_dict = {
"aggr_0": Status.Completed,
"aggr_1": Status.Failed,
"aggr_2": Status.Bypassed,
"aggr_4": Status.Failed,
}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict=aggr_dict)
assert batch_result.total_lines == 3
assert batch_result.failed_lines == 1
assert batch_result.error_summary.failed_system_error_lines == 0
assert batch_result.error_summary.failed_user_error_lines == 1
assert batch_result.error_summary.error_list == [
LineError(line_number=1, error={"code": "UserError", "message": "test message"}),
]
assert batch_result.error_summary.error_list[0].to_dict() == {
"line_number": 1,
"error": {
"code": "UserError",
"message": "test message",
},
}
assert batch_result.error_summary.aggr_error_dict == {
"aggr_1": {"code": "UserError", "message": "test message"},
"aggr_4": {"code": "UserError", "message": "test message"},
}
@pytest.mark.unittest
class TestErrorSummary:
def test_create(self):
line_dict = {
0: {"node_0": Status.Failed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
}
line_results = get_line_results(line_dict)
line_results[0].run_info.error = {"code": "SystemError", "message": "test system error message"}
aggr_dict = {"aggr_0": Status.Completed, "aggr_1": Status.Failed}
aggr_result = get_aggregation_result(aggr_dict)
error_summary = ErrorSummary.create(line_results, aggr_result)
assert error_summary.failed_user_error_lines == 1
assert error_summary.failed_system_error_lines == 1
assert error_summary.error_list == [
LineError(line_number=0, error={"code": "SystemError", "message": "test system error message"}),
LineError(line_number=1, error={"code": "UserError", "message": "test message"}),
]
assert error_summary.aggr_error_dict == {"aggr_1": {"code": "UserError", "message": "test message"}}
@pytest.mark.unittest
class TestSystemMetrics:
def test_create(slef):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Completed},
}
line_system_metrics = {
"total_tokens": 5,
"prompt_tokens": 3,
"completion_tokens": 2,
}
line_results = get_line_results(line_dict, system_metrics=line_system_metrics)
aggr_dict = {"aggr_0": Status.Completed}
# invalid system metrics
aggr_system_metrics = {
"total_tokens": 10,
"prompt_tokens": 6,
}
aggr_result = get_aggregation_result(aggr_dict, system_metrics=aggr_system_metrics)
system_metrics = SystemMetrics.create(datetime.utcnow(), datetime.utcnow(), line_results, aggr_result)
assert system_metrics.total_tokens == 20
assert system_metrics.prompt_tokens == 12
assert system_metrics.completion_tokens == 8
system_metrics_dict = {
"total_tokens": 20,
"prompt_tokens": 12,
"completion_tokens": 8,
}
assert system_metrics_dict.items() <= system_metrics.to_dict().items()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/integrations/test_langchain.py | import pytest
from langchain.schema import AgentAction, AgentFinish
from promptflow.integrations.langchain import LangChainEventType, PromptFlowCallbackHandler
@pytest.mark.unittest
class TestLangchain:
def get_handler(self):
class MockTracer():
def __init__(self):
self._trace_stack = []
def _push(self, trace):
self._trace_stack.append(trace)
def _pop(self, output=None, error=None):
self._trace_stack.pop()
handler = PromptFlowCallbackHandler()
handler._tracer = MockTracer()
return handler
def test_langchain_traces(self):
handler = self.get_handler()
handler.on_agent_action(action=AgentAction("test_agent_name", "test", "test"))
handler.on_tool_start(serialized={"name": "test_tool_name"}, input_str="test")
handler.on_chain_start(serialized={"id": ["test_chain_name"]}, inputs={"test": "test"})
handler.on_llm_start(serialized={"test": "test"}, prompts=["test"])
assert handler._events_stack == [
LangChainEventType.AGENT,
LangChainEventType.TOOL,
LangChainEventType.CHAIN,
LangChainEventType.LLM
]
assert len(handler._tracer._trace_stack) == 4
assert handler._tracer._trace_stack[0].name == "test_agent_name"
assert handler._tracer._trace_stack[1].name == "test_tool_name"
assert handler._tracer._trace_stack[2].name == "test_chain_name"
assert handler._tracer._trace_stack[3].name == "LLM" # The default name
handler.on_llm_error(error=None)
handler.on_chain_error(error=None)
handler.on_tool_error(error=None)
handler.on_agent_finish(finish=AgentFinish({"test": "test"}, "test"))
assert len(handler._events_stack) == 0
assert len(handler._tracer._trace_stack) == 0
def test_langchain_traces_with_unpaired_events(self):
handler = self.get_handler()
handler.on_tool_start(serialized={"test": "test"}, input_str="test")
# Missing on_chain_start
# Missing on_llm_start
assert len(handler._tracer._trace_stack) == 1
handler.on_llm_end(response=None)
handler.on_chain_end(outputs={"test": "test"})
assert len(handler._tracer._trace_stack) == 1
handler.on_tool_end(output="test")
assert len(handler._events_stack) == 0
assert len(handler._tracer._trace_stack) == 0
handler = self.get_handler()
handler.on_tool_start(serialized={"test": "test"}, input_str="test")
handler.on_chain_start(serialized={"test": "test"}, inputs={"test": "test"})
handler.on_llm_start(serialized={"test": "test"}, prompts=["test"])
assert len(handler._tracer._trace_stack) == 3
# Missing on_chain_end
# Missing on_llm_end
handler.on_tool_end(output="test")
assert len(handler._events_stack) == 0
assert len(handler._tracer._trace_stack) == 0
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/storage/test_queue_run_storage.py | import pytest
from multiprocessing import Queue
from promptflow.executor._line_execution_process_pool import QueueRunStorage
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
@pytest.mark.unittest
class TestLineExecutionProcessPool:
def test_persist_node_run(self):
queue = Queue()
run_storage = QueueRunStorage(queue)
node_run_info = NodeRunInfo(
node="node1",
flow_run_id="flow_run_id",
run_id="run_id",
status="status",
inputs="inputs",
output="output",
metrics="metrics",
error="error",
parent_run_id="parent_run_id",
start_time="start_time",
end_time="end_time",
index="index",
api_calls="api_calls",
variant_id="variant_id",
cached_run_id="cached_run_id",
cached_flow_run_id="cached_flow_run_id",
logs="logs",
system_metrics="system_metrics",
result="result",
)
run_storage.persist_node_run(node_run_info)
assert queue.get() == node_run_info
def test_persist_flow_run(self):
queue = Queue()
run_storage = QueueRunStorage(queue)
flow_run_info = FlowRunInfo(
run_id="run_id",
status="status",
inputs="inputs",
output="output",
metrics="metrics",
request="request",
root_run_id="root_run_id",
source_run_id="source_run_id",
flow_id="flow_id",
error="error",
parent_run_id="parent_run_id",
start_time="start_time",
end_time="end_time",
index="index",
api_calls="api_calls",
variant_id="variant_id",
system_metrics="system_metrics",
result="result",
)
run_storage.persist_flow_run(flow_run_info)
assert queue.get() == flow_run_info
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/storage/test_run_records.py | import json
from datetime import datetime
import pytest
from promptflow._utils.dataclass_serializer import serialize
from promptflow.contracts.run_info import FlowRunInfo, RunInfo, Status
from promptflow.storage.run_records import LineRunRecord, NodeRunRecord
@pytest.mark.unittest
def test_line_record():
start_time = datetime(2023, 7, 12)
end_time = datetime(2023, 7, 13)
flow_run_info = FlowRunInfo(
run_id=None,
status=Status.Completed,
error=None,
inputs=None,
output=None,
metrics=None,
request=None,
parent_run_id=None,
root_run_id=None,
source_run_id=None,
flow_id=None,
start_time=start_time,
end_time=end_time,
index=0,
variant_id=None,
)
line_record = LineRunRecord.from_run_info(flow_run_info)
assert line_record.line_number == 0
assert line_record.start_time == start_time.isoformat()
assert line_record.end_time == end_time.isoformat()
assert line_record.status == Status.Completed.value
assert line_record.run_info == serialize(flow_run_info)
@pytest.mark.unittest
def test_line_serialize():
start_time = datetime(2023, 7, 12)
end_time = datetime(2023, 7, 13)
flow_run_info = FlowRunInfo(
run_id=None,
status=Status.Completed,
error=None,
inputs=None,
output=None,
metrics=None,
request=None,
parent_run_id=None,
root_run_id=None,
source_run_id=None,
flow_id=None,
start_time=start_time,
end_time=end_time,
index=0,
variant_id=None,
)
line_record = LineRunRecord.from_run_info(flow_run_info)
result = line_record.serialize()
expected_result = json.dumps(line_record.__dict__)
assert result == expected_result
@pytest.mark.unittest
def test_node_record():
start_time = datetime(2023, 7, 12)
end_time = datetime(2023, 7, 13)
node_run_info = RunInfo(
node=None,
run_id=None,
flow_run_id=None,
status=Status.Completed,
inputs=None,
output=None,
metrics=None,
error=None,
parent_run_id=None,
start_time=start_time,
end_time=end_time,
index=0,
)
node_record = NodeRunRecord.from_run_info(node_run_info)
assert node_record.line_number == 0
assert node_record.start_time == start_time.isoformat()
assert node_record.end_time == end_time.isoformat()
assert node_record.status == Status.Completed.value
assert node_record.run_info == serialize(node_run_info)
@pytest.mark.unittest
def test_node_serialize():
start_time = datetime(2023, 7, 12)
end_time = datetime(2023, 7, 13)
node_run_info = RunInfo(
node=None,
run_id=None,
flow_run_id=None,
status=Status.Completed,
inputs=None,
output=None,
metrics=None,
error=None,
parent_run_id=None,
start_time=start_time,
end_time=end_time,
index=0,
)
node_record = NodeRunRecord.from_run_info(node_run_info)
result = node_record.serialize()
expected_result = json.dumps(node_record.__dict__)
assert result == expected_result
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_operation_context.py | import threading
import pytest
from promptflow._core.operation_context import OperationContext
from promptflow._version import VERSION
from promptflow.contracts.run_mode import RunMode
def set_run_mode(context: OperationContext, run_mode: RunMode):
"""This method simulates the runtime.execute_request()
It is aimed to set the run_mode into operation context.
"""
context.run_mode = run_mode.name if run_mode is not None else ""
@pytest.mark.unittest
class TestOperationContext:
def test_get_user_agent(self):
operation_context = OperationContext()
assert operation_context.get_user_agent() == f"promptflow/{VERSION}"
operation_context.user_agent = "test_agent/0.0.2"
assert operation_context.get_user_agent() == f"test_agent/0.0.2 promptflow/{VERSION}"
@pytest.mark.parametrize(
"run_mode, expected",
[
(RunMode.Test, "Test"),
(RunMode.SingleNode, "SingleNode"),
(RunMode.Batch, "Batch"),
],
)
def test_run_mode(self, run_mode, expected):
context = OperationContext()
set_run_mode(context, run_mode)
assert context.run_mode == expected
def test_context_dict(self):
context = OperationContext()
context.run_mode = "Flow"
context.user_agent = "test_agent/0.0.2"
context.none_value = None
context_dict = context.get_context_dict()
assert context_dict["run_mode"] == "Flow"
assert context_dict["user_agent"] == "test_agent/0.0.2"
assert context_dict["none_value"] is None
def test_setattr(self):
context = OperationContext()
context.run_mode = "Flow"
assert context["run_mode"] == "Flow"
def test_setattr_non_primitive(self):
# Test set non-primitive type
context = OperationContext()
with pytest.raises(TypeError):
context.foo = [1, 2, 3]
def test_getattr(self):
context = OperationContext()
context["run_mode"] = "Flow"
assert context.run_mode == "Flow"
def test_getattr_missing(self):
context = OperationContext()
with pytest.raises(AttributeError):
context.foo
def test_delattr(self):
# test that delattr works as expected
context = OperationContext()
context.foo = "bar"
del context.foo
assert "foo" not in context
# test that delattr raises AttributeError for non-existent name
with pytest.raises(AttributeError):
del context.baz
def test_append_user_agent(self):
context = OperationContext()
user_agent = ' ' + context.user_agent if 'user_agent' in context else ''
context.append_user_agent("test_agent/0.0.2")
assert context.user_agent == "test_agent/0.0.2" + user_agent
context.append_user_agent("test_agent/0.0.3")
assert context.user_agent == "test_agent/0.0.2 test_agent/0.0.3" + user_agent
def test_get_instance(self):
context1 = OperationContext.get_instance()
context2 = OperationContext.get_instance()
assert context1 is context2
def test_set_batch_input_source_from_inputs_mapping_run(self):
input_mapping = {"input1": "${run.outputs.output1}", "input2": "${run.outputs.output2}"}
context = OperationContext()
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Run"
def test_set_batch_input_source_from_inputs_mapping_data(self):
input_mapping = {"url": "${data.url}"}
context = OperationContext()
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Data"
def test_set_batch_input_source_from_inputs_mapping_none(self):
input_mapping = None
context = OperationContext()
assert not hasattr(context, "batch_input_source")
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Data"
def test_set_batch_input_source_from_inputs_mapping_empty(self):
input_mapping = {}
context = OperationContext()
assert not hasattr(context, "batch_input_source")
context.set_batch_input_source_from_inputs_mapping(input_mapping)
assert context.batch_input_source == "Data"
def test_different_thread_have_different_instance(self):
# create a list to store the OperationContext instances from each thread
instances = []
# define a function that gets the OperationContext instance and appends it to the list
def get_instance():
instance = OperationContext.get_instance()
instances.append(instance)
# create two threads and run the function in each thread
thread1 = threading.Thread(target=get_instance)
thread2 = threading.Thread(target=get_instance)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# assert that the list has two elements and they are different objects
assert len(instances) == 2
assert instances[0] is not instances[1]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_run_tracker.py | import pytest
from promptflow._core._errors import RunRecordNotFound
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._core.run_tracker import RunTracker
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.run_info import Status
class UnserializableClass:
def __init__(self, data: str):
self.data = data
@pytest.mark.unittest
class TestRunTracker:
def test_run_tracker(self):
# TODO: Refactor this test case, it's very confusing now.
# Initialize run tracker with dummy run storage
run_tracker = RunTracker.init_dummy()
# Start flow run
run_tracker.start_flow_run("test_flow_id", "test_root_run_id", "test_flow_run_id")
assert len(run_tracker._flow_runs) == 1
assert run_tracker._current_run_id == "test_flow_run_id"
flow_input = {"flow_input": "input_0"}
run_tracker.set_inputs("test_flow_run_id", flow_input)
# Start node runs
run_info = run_tracker.start_node_run("node_0", "test_root_run_id", "test_flow_run_id", "run_id_0", index=0)
run_info.index = 0
run_info = run_tracker.start_node_run("node_0", "test_root_run_id", "test_flow_run_id", "run_id_1", index=1)
run_info.index = 1
run_tracker.start_node_run("node_aggr", "test_root_run_id", "test_flow_run_id", "run_id_aggr", index=None)
assert len(run_tracker._node_runs) == 3
assert run_tracker._current_run_id == "run_id_aggr"
# Test collect_all_run_infos_as_dicts
run_tracker.allow_generator_types = True
run_tracker.set_inputs(
"run_id_0",
{"input": "input_0", "connection": AzureOpenAIConnection("api_key", "api_base")}
)
run_tracker.set_inputs(
"run_id_1",
{"input": "input_1", "generator": GeneratorProxy(item for item in range(10))}
)
run_infos = run_tracker.collect_all_run_infos_as_dicts()
assert len(run_infos["flow_runs"]) == 1
assert len(run_infos["node_runs"]) == 3
assert run_infos["node_runs"][0]["inputs"] == {"input": "input_0", "connection": "AzureOpenAIConnection"}
assert run_infos["node_runs"][1]["inputs"] == {"input": "input_1", "generator": []}
# Test end run with normal result
result = {"result": "result"}
run_info_0 = run_tracker.end_run(run_id="run_id_0", result=result)
assert run_info_0.status == Status.Completed
assert run_info_0.output == result
# Test end run with unserializable result
result = {"unserialized_value": UnserializableClass("test")}
run_info_1 = run_tracker.end_run(run_id="run_id_1", result=result)
assert run_info_1.status == Status.Completed
assert run_info_1.output == str(result)
# Test end run with invalid run id
with pytest.raises(RunRecordNotFound):
run_tracker.end_run(run_id="invalid_run_id")
# Test end run with exception
ex = Exception("Failed")
run_info_aggr = run_tracker.end_run(run_id="run_id_aggr", ex=ex)
assert run_info_aggr.status == Status.Failed
assert run_info_aggr.error["message"] == "Failed"
# Test end flow run with unserializable result
result = {"unserialized_value": UnserializableClass("test")}
run_info_flow = run_tracker.end_run(run_id="test_flow_run_id", result=result)
assert run_info_flow.status == Status.Failed
assert "The output 'unserialized_value' for flow is incorrect." in run_info_flow.error["message"]
# Test _update_flow_run_info_with_node_runs
run_info_0.api_calls, run_info_0.system_metrics = [{"name": "caht"}], {"total_tokens": 10}
run_info_1.api_calls, run_info_1.system_metrics = [{"name": "completion"}], {"total_tokens": 20}
run_info_aggr.api_calls, run_info_aggr.system_metrics = [
{"name": "caht"}, {"name": "completion"}], {"total_tokens": 30}
run_tracker._update_flow_run_info_with_node_runs(run_info_flow)
assert len(run_info_flow.api_calls) == 1, "There should be only one top level api call for flow run."
assert run_info_flow.system_metrics["total_tokens"] == 60
assert run_info_flow.api_calls[0]["name"] == "flow"
assert run_info_flow.api_calls[0]["node_name"] == "flow"
assert run_info_flow.api_calls[0]["type"] == "Flow"
assert run_info_flow.api_calls[0]["system_metrics"]["total_tokens"] == 60
assert isinstance(run_info_flow.api_calls[0]["start_time"], float)
assert isinstance(run_info_flow.api_calls[0]["end_time"], float)
assert len(run_info_flow.api_calls[0]["children"]) == 4, "There should be 4 children under root."
# Test get_status_summary
status_summary = run_tracker.get_status_summary("test_root_run_id")
assert status_summary == {
"__pf__.lines.completed": 0,
"__pf__.lines.failed": 1,
"__pf__.nodes.node_0.completed": 2,
"__pf__.nodes.node_aggr.completed": 0,
}
def test_run_tracker_flow_run_without_node_run(self):
"""When line timeout, there will be flow run info without node run info."""
# Initialize run tracker with dummy run storage
run_tracker = RunTracker.init_dummy()
# Start flow run
run_tracker.start_flow_run("test_flow_id", "test_root_run_id", "test_flow_run_id_0", index=0)
run_tracker.end_run("test_flow_run_id_0", ex=Exception("Timeout"))
run_tracker.start_flow_run("test_flow_id", "test_root_run_id", "test_flow_run_id_1", index=1)
run_tracker.end_run("test_flow_run_id_1", result={"result": "result"})
assert len(run_tracker._flow_runs) == 2
# Start node runs
run_tracker.start_node_run("node_0", "test_root_run_id", "test_flow_run_id_2", "test_node_run_id_1", index=0)
run_tracker.end_run("test_node_run_id_1", result={"result": "result"})
assert len(run_tracker._node_runs) == 1
status_summary = run_tracker.get_status_summary("test_root_run_id")
assert status_summary == {
"__pf__.lines.completed": 1,
"__pf__.lines.failed": 1,
"__pf__.nodes.node_0.completed": 1,
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_tracer.py | import inspect
import pytest
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._core.tracer import Tracer, _create_trace_from_function_call, _traced, trace
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.trace import Trace, TraceType
def generator():
for i in range(3):
yield i
@pytest.mark.unittest
class TestTracer:
def test_end_tracing(self):
# Activate the tracer in the current context
tracer = Tracer("test_run_id")
tracer._activate_in_context()
# Assert that there is an active tracer instance
assert Tracer.active_instance() is tracer
# End tracing and get the traces as a JSON string
traces = Tracer.end_tracing()
# Assert that the traces is a list
assert isinstance(traces, list)
# Assert that there is no active tracer instance after ending tracing
assert Tracer.active_instance() is None
# Test the raise_ex argument of the end_tracing method
with pytest.raises(Exception):
# Try to end tracing again with raise_ex=True
Tracer.end_tracing(raise_ex=True)
# Try to end tracing again with raise_ex=False
traces = Tracer.end_tracing(raise_ex=False)
# Assert that the traces are empty
assert not traces
def test_start_tracing(self):
# Assert that there is no active tracer instance before starting tracing
assert Tracer.active_instance() is None
# Start tracing with a mock run_id
Tracer.start_tracing("test_run_id")
# Assert that there is an active tracer instance after starting tracing
assert Tracer.active_instance() is not None
# Assert that the active tracer instance has the correct run_id
assert Tracer.active_instance()._run_id == "test_run_id"
Tracer.end_tracing()
def test_push_pop(self, caplog):
# test the push method with a single trace
Tracer.start_tracing("test_run_id")
tracer = Tracer.active_instance()
trace1 = Trace("test1", inputs=[1, 2, 3], type=TraceType.TOOL)
trace2 = Trace("test2", inputs=[4, 5, 6], type=TraceType.TOOL)
Tracer.push(trace1)
assert tracer._traces == [trace1]
assert tracer._id_to_trace == {trace1.id: trace1}
# test the push method with a nested trace
Tracer.push(trace2)
assert tracer._traces == [trace1] # check if the tracer still has only the first trace in its _traces list
# check if the tracer has both traces in its trace dict
assert tracer._id_to_trace == {trace1.id: trace1, trace2.id: trace2}
assert trace1.children == [trace2] # check if the first trace has the second trace as its child
# test the pop method with generator output
tool_output = generator()
error1 = ValueError("something went wrong")
assert tracer._get_current_trace() is trace2
output = Tracer.pop(output=tool_output, error=error1)
# check output iterator
for i in range(3):
assert next(output) == i
assert isinstance(trace2.output, GeneratorProxy)
assert trace2.error == {
"message": str(error1),
"type": type(error1).__qualname__,
}
assert tracer._get_current_trace() is trace1
# test the pop method with no arguments
output = Tracer.pop()
assert tracer._get_current_trace() is None
assert trace1.output is None
assert output is None
Tracer.end_tracing()
# test the push method with no active tracer
Tracer.push(trace1)
# assert that the warning message is logged
assert "Try to push trace but no active tracer in current context." in caplog.text
def test_unserializable_obj_to_serializable(self):
# assert that the function returns a str object for unserializable objects
assert Tracer.to_serializable(generator) == str(generator)
@pytest.mark.parametrize("obj", [({"name": "Alice", "age": 25}), ([1, 2, 3]), (GeneratorProxy(generator())), (42)])
def test_to_serializable(self, obj):
assert Tracer.to_serializable(obj) == obj
def func_with_no_parameters():
pass
def func_with_args_and_kwargs(arg1, arg2=None, *, kwarg1=None, kwarg2=None):
_ = (arg1, arg2, kwarg1, kwarg2)
async def func_with_args_and_kwargs_async(arg1, arg2=None, *, kwarg1=None, kwarg2=None):
_ = (arg1, arg2, kwarg1, kwarg2)
def func_with_connection_parameter(a: int, conn: AzureOpenAIConnection):
_ = (a, conn)
class MyClass:
def my_method(self, a: int):
_ = a
@pytest.mark.unittest
class TestCreateTraceFromFunctionCall:
"""This class tests the `_create_trace_from_function_call` function."""
def test_basic_fields_are_filled_and_others_are_not(self):
trace = _create_trace_from_function_call(func_with_no_parameters)
# These fields should be filled in this method call.
assert trace.name == "func_with_no_parameters"
assert trace.type == TraceType.FUNCTION
assert trace.inputs == {}
# start_time should be a timestamp, which is a float value currently.
assert isinstance(trace.start_time, float)
# These should be left empty in this method call.
# They will be filled by the tracer later.
assert trace.output is None
assert trace.end_time is None
assert trace.children == []
assert trace.error is None
def test_basic_fields_are_filled_for_async_functions(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs_async, args=[1, 2], kwargs={"kwarg1": 3, "kwarg2": 4}
)
assert trace.name == "func_with_args_and_kwargs_async"
assert trace.type == TraceType.FUNCTION
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_trace_name_should_contain_class_name_for_class_methods(self):
obj = MyClass()
trace = _create_trace_from_function_call(obj.my_method, args=[obj, 1])
assert trace.name == "MyClass.my_method"
def test_trace_type_can_be_set_correctly(self):
trace = _create_trace_from_function_call(func_with_no_parameters, trace_type=TraceType.TOOL)
assert trace.type == TraceType.TOOL
def test_args_and_kwargs_are_filled_correctly(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs, args=[1, 2], kwargs={"kwarg1": 3, "kwarg2": 4}
)
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_args_called_with_name_should_be_filled_correctly(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1], kwargs={"arg2": 2, "kwarg2": 4})
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg2": 4}
def test_kwargs_called_without_name_should_be_filled_correctly(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1, 2, 3], kwargs={"kwarg2": 4})
assert trace.inputs == {"arg1": 1, "arg2": 2, "kwarg1": 3, "kwarg2": 4}
def test_empty_args_should_be_excluded_from_inputs(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, args=[1])
assert trace.inputs == {"arg1": 1}
def test_empty_kwargs_should_be_excluded_from_inputs(self):
trace = _create_trace_from_function_call(func_with_args_and_kwargs, kwargs={"kwarg1": 1})
assert trace.inputs == {"kwarg1": 1}
trace = _create_trace_from_function_call(func_with_args_and_kwargs, kwargs={"kwarg2": 2})
assert trace.inputs == {"kwarg2": 2}
def test_args_and_kwargs_should_be_filled_in_called_order(self):
trace = _create_trace_from_function_call(
func_with_args_and_kwargs, args=[1, 2], kwargs={"kwarg2": 4, "kwarg1": 3}
)
assert list(trace.inputs.keys()) == ["arg1", "arg2", "kwarg2", "kwarg1"]
def test_connections_should_be_serialized(self):
conn = AzureOpenAIConnection("test_name", "test_secret")
trace = _create_trace_from_function_call(func_with_connection_parameter, args=[1, conn])
assert trace.inputs == {"a": 1, "conn": "AzureOpenAIConnection"}
def test_self_arg_should_be_excluded_from_inputs(self):
obj = MyClass()
trace = _create_trace_from_function_call(obj.my_method, args=[1])
assert trace.inputs == {"a": 1}
def sync_func(a: int):
return a
async def async_func(a: int):
return a
def sync_error_func(a: int):
a / 0
async def async_error_func(a: int):
a / 0
@pytest.mark.unittest
class TestTraced:
"""This class tests the `_traced` function."""
def test_traced_sync_func_should_be_a_sync_func(self):
assert inspect.iscoroutinefunction(_traced(sync_func)) is False
def test_traced_async_func_should_be_an_async_func(self):
assert inspect.iscoroutinefunction(_traced(async_func)) is True
@pytest.mark.parametrize("func", [sync_func, async_func])
def test_original_function_and_wrapped_function_should_have_same_name(self, func):
traced_func = _traced(func)
assert traced_func.__name__ == func.__name__
@pytest.mark.parametrize("func", [sync_func, async_func])
def test_original_function_and_wrapped_function_attributes_are_set(self, func):
traced_func = _traced(func)
assert getattr(traced_func, "__original_function") == func
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_is_not_generated_when_tracer_is_not_active(self, func):
# Do not call Tracer.start_tracing() here
traced_func = _traced(func)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
# Check the result is expected
assert result == 1
# Check the generated trace is not generated
traces = Tracer.end_tracing()
assert len(traces) == 0
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_is_generated_when_tracer_is_active(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
# Check the result is expected
assert result == 1
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_error_func, async_error_func])
async def test_trace_is_generated_when_errors_occurred(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func)
with pytest.raises(ZeroDivisionError):
if inspect.iscoroutinefunction(traced_func):
await traced_func(1)
else:
traced_func(1)
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] is None
assert trace["error"] == {"message": "division by zero", "type": "ZeroDivisionError"}
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
@pytest.mark.asyncio
@pytest.mark.parametrize("func", [sync_func, async_func])
async def test_trace_type_can_be_set_correctly(self, func):
Tracer.start_tracing("test_run_id")
traced_func = _traced(func, trace_type=TraceType.TOOL)
if inspect.iscoroutinefunction(traced_func):
result = await traced_func(1)
else:
result = traced_func(1)
assert result == 1
traces = Tracer.end_tracing()
# Check the generated trace is expected
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.TOOL
@trace
def my_tool(a: int):
return a
@trace
async def my_tool_async(a: int):
return a
@pytest.mark.unittest
class TestTrace:
"""This class tests `trace` function."""
@pytest.mark.asyncio
@pytest.mark.parametrize(
"func",
[
my_tool,
my_tool_async,
],
)
async def test_traces_are_created_correctly(self, func):
Tracer.start_tracing("test_run_id")
if inspect.iscoroutinefunction(func):
result = await func(1)
else:
result = func(1)
assert result == 1
traces = Tracer.end_tracing()
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.FUNCTION
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_api_injector.py | import logging
from collections import namedtuple
from importlib.metadata import version
from types import GeneratorType
from unittest.mock import MagicMock, patch
import openai
import pytest
from promptflow._core.openai_injector import (
PROMPTFLOW_PREFIX,
USER_AGENT_HEADER,
_generate_api_and_injector,
_openai_api_list,
get_aoai_telemetry_headers,
inject_async,
inject_openai_api,
inject_operation_headers,
inject_sync,
recover_openai_api,
)
from promptflow._core.operation_context import OperationContext
from promptflow._core.tracer import Tracer
from promptflow._version import VERSION
from promptflow.connections import AzureOpenAIConnection
from promptflow.exceptions import UserErrorException
from promptflow.tools.aoai import AzureOpenAI
from promptflow.tools.embedding import embedding
IS_LEGACY_OPENAI = version("openai").startswith("0.")
# Mock classes and functions for test
class MockAPI:
def create(self):
pass
@pytest.mark.unittest
def test_inject_operation_headers_sync():
@inject_operation_headers
def f(**kwargs):
return kwargs
if IS_LEGACY_OPENAI:
headers = "headers"
kwargs_1 = {"headers": {"a": 1, "b": 2}}
kwargs_2 = {"headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
else:
headers = "extra_headers"
kwargs_1 = {"extra_headers": {"a": 1, "b": 2}}
kwargs_2 = {"extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
injected_headers = get_aoai_telemetry_headers()
assert f(a=1, b=2) == {"a": 1, "b": 2, headers: injected_headers}
merged_headers = {**injected_headers, "a": 1, "b": 2}
assert f(**kwargs_1) == {headers: merged_headers}
aoai_tools_headers = injected_headers.copy()
aoai_tools_headers.update({"ms-azure-ai-promptflow-called-from": "aoai-tool"})
assert f(**kwargs_2) == {headers: aoai_tools_headers}
@pytest.mark.unittest
@pytest.mark.asyncio
async def test_inject_operation_headers_async():
@inject_operation_headers
async def f(**kwargs):
return kwargs
if IS_LEGACY_OPENAI:
headers = "headers"
kwargs_1 = {"headers": {"a": 1, "b": 2}}
kwargs_2 = {"headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
else:
headers = "extra_headers"
kwargs_1 = {"extra_headers": {"a": 1, "b": 2}}
kwargs_2 = {"extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"}}
injected_headers = get_aoai_telemetry_headers()
assert await f(a=1, b=2) == {"a": 1, "b": 2, headers: injected_headers}
merged_headers = {**injected_headers, "a": 1, "b": 2}
assert await f(**kwargs_1) == {headers: merged_headers}
aoai_tools_headers = injected_headers.copy()
aoai_tools_headers.update({"ms-azure-ai-promptflow-called-from": "aoai-tool"})
assert await f(**kwargs_2) == {headers: aoai_tools_headers}
@pytest.mark.unittest
def test_aoai_generator_proxy_sync():
def mock_aoai(**kwargs):
# check if args has a stream parameter
if "stream" in kwargs and kwargs["stream"]:
# stream parameter is true, yield a string
def generator():
yield "This is a yielded string"
return generator()
else:
# stream parameter is false or not given, return a string
return "This is a returned string"
if IS_LEGACY_OPENAI:
apis = ["openai.Completion.create", "openai.ChatCompletion.create", "openai.Embedding.create"]
else:
apis = [
"openai.resources.Completions.create",
"openai.resources.chat.Completions.create",
"openai.resources.Embeddings.create",
]
with patch(apis[0], new=mock_aoai), patch(apis[1], new=mock_aoai), patch(apis[2], new=mock_aoai):
Tracer.start_tracing("mock_run_id")
inject_openai_api()
if IS_LEGACY_OPENAI:
return_string = openai.Completion.create(stream=False)
return_generator = openai.Completion.create(stream=True)
else:
return_string = openai.resources.Completions.create(stream=False)
return_generator = openai.resources.Completions.create(stream=True)
assert return_string == "This is a returned string"
assert isinstance(return_generator, GeneratorType)
for _ in return_generator:
pass
traces = Tracer.end_tracing()
assert len(traces) == 2
for trace in traces:
assert trace["type"] == "LLM"
if trace["inputs"]["stream"]:
assert trace["output"] == ["This is a yielded string"]
else:
assert trace["output"] == "This is a returned string"
@pytest.mark.unittest
@pytest.mark.asyncio
async def test_aoai_generator_proxy_async():
async def mock_aoai(**kwargs):
# check if args has a stream parameter
if "stream" in kwargs and kwargs["stream"]:
# stream parameter is true, yield a string
def generator():
yield "This is a yielded string"
return generator()
else:
# stream parameter is false or not given, return a string
return "This is a returned string"
if IS_LEGACY_OPENAI:
apis = ["openai.Completion.acreate", "openai.ChatCompletion.acreate", "openai.Embedding.acreate"]
else:
apis = [
"openai.resources.AsyncCompletions.create",
"openai.resources.chat.AsyncCompletions.create",
"openai.resources.AsyncEmbeddings.create",
]
with patch(apis[0], new=mock_aoai), patch(apis[1], new=mock_aoai), patch(apis[2], new=mock_aoai):
Tracer.start_tracing("mock_run_id")
inject_openai_api()
if IS_LEGACY_OPENAI:
return_string = await openai.Completion.acreate(stream=False)
return_generator = await openai.Completion.acreate(stream=True)
else:
return_string = await openai.resources.AsyncCompletions.create(stream=False)
return_generator = await openai.resources.AsyncCompletions.create(stream=True)
assert return_string == "This is a returned string"
assert isinstance(return_generator, GeneratorType)
for _ in return_generator:
pass
traces = Tracer.end_tracing()
assert len(traces) == 2
for trace in traces:
assert trace["type"] == "LLM"
if trace["inputs"]["stream"]:
assert trace["output"] == ["This is a yielded string"]
else:
assert trace["output"] == "This is a returned string"
@pytest.mark.unittest
def test_aoai_call_inject():
if IS_LEGACY_OPENAI:
headers = "headers"
apis = ["openai.Completion.create", "openai.ChatCompletion.create", "openai.Embedding.create"]
else:
headers = "extra_headers"
apis = [
"openai.resources.Completions.create",
"openai.resources.chat.Completions.create",
"openai.resources.Embeddings.create",
]
def mock_aoai(**kwargs):
return kwargs.get(headers)
with patch(apis[0], new=mock_aoai), patch(apis[1], new=mock_aoai), patch(apis[2], new=mock_aoai):
inject_openai_api()
injected_headers = get_aoai_telemetry_headers()
if IS_LEGACY_OPENAI:
return_headers_1 = openai.Completion.create(headers=None)
return_headers_2 = openai.ChatCompletion.create(headers="abc")
return_headers_3 = openai.Embedding.create(headers=1)
else:
return_headers_1 = openai.resources.Completions.create(extra_headers=None)
return_headers_2 = openai.resources.chat.Completions.create(extra_headers="abc")
return_headers_3 = openai.resources.Embeddings.create(extra_headers=1)
assert return_headers_1 is not None
assert injected_headers.items() <= return_headers_1.items()
assert return_headers_2 is not None
assert injected_headers.items() <= return_headers_2.items()
assert return_headers_3 is not None
assert injected_headers.items() <= return_headers_3.items()
@pytest.mark.unittest
def test_aoai_tool_header():
def mock_complete(*args, **kwargs):
Response = namedtuple("Response", ["choices"])
Choice = namedtuple("Choice", ["text"])
choice = Choice(text=kwargs.get("extra_headers", {}))
response = Response(choices=[choice])
return response
def mock_chat(*args, **kwargs):
Completion = namedtuple("Completion", ["choices"])
Choice = namedtuple("Choice", ["message"])
Message = namedtuple("Message", ["content"])
message = Message(content=kwargs.get("extra_headers", {}))
choice = Choice(message=message)
completion = Completion(choices=[choice])
return completion
def mock_embedding(*args, **kwargs):
Response = namedtuple("Response", ["data"])
Embedding = namedtuple("Embedding", ["embedding"])
response = Response(data=[Embedding(embedding=kwargs.get("extra_headers", {}))])
return response
with patch("openai.resources.Completions.create", new=mock_complete), patch(
"openai.resources.chat.Completions.create", new=mock_chat
), patch("openai.resources.Embeddings.create", new=mock_embedding):
inject_openai_api()
aoai_tool_header = {"ms-azure-ai-promptflow-called-from": "aoai-tool"}
return_headers = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).completion(
prompt="test", deployment_name="test"
)
assert aoai_tool_header.items() <= return_headers.items()
return_headers = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:\ntest", deployment_name="test"
)
assert aoai_tool_header.items() <= return_headers.items()
return_headers = embedding(
AzureOpenAIConnection(api_key="test", api_base="test"), input="test", deployment_name="test"
)
assert aoai_tool_header.items() <= return_headers.items()
@pytest.mark.unittest
def test_aoai_chat_tool_prompt():
def mock_chat(*args, **kwargs):
Completion = namedtuple("Completion", ["choices"])
Choice = namedtuple("Choice", ["message"])
Message = namedtuple("Message", ["content"])
message = Message(content=kwargs.get("messages", {}))
choice = Choice(message=message)
completion = Completion(choices=[choice])
return completion
with patch("openai.resources.chat.Completions.create", new=mock_chat):
inject_openai_api()
return_messages = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:\ntest", deployment_name="test"
)
assert return_messages == [{"role": "user", "content": "test"}]
return_messages = AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:\r\n", deployment_name="test"
)
assert return_messages == [{"role": "user", "content": ""}]
with pytest.raises(UserErrorException, match="The Chat API requires a specific format for prompt"):
AzureOpenAI(AzureOpenAIConnection(api_key="test", api_base="test")).chat(
prompt="user:", deployment_name="test"
)
# The new generator-based test function
@pytest.mark.parametrize(
"is_legacy, expected_apis_with_injectors",
[
(
True,
[
(
(
("openai", "Completion", "create"),
("openai", "ChatCompletion", "create"),
("openai", "Embedding", "create"),
),
inject_sync,
),
(
(
("openai", "Completion", "acreate"),
("openai", "ChatCompletion", "acreate"),
("openai", "Embedding", "acreate"),
),
inject_async,
),
],
),
(
False,
[
(
(
("openai.resources.chat", "Completions", "create"),
("openai.resources", "Completions", "create"),
("openai.resources", "Embeddings", "create"),
),
inject_sync,
),
(
(
("openai.resources.chat", "AsyncCompletions", "create"),
("openai.resources", "AsyncCompletions", "create"),
("openai.resources", "AsyncEmbeddings", "create"),
),
inject_async,
),
],
),
],
)
def test_api_list(is_legacy, expected_apis_with_injectors):
with patch("promptflow._core.openai_injector.IS_LEGACY_OPENAI", is_legacy):
# Using list comprehension to get all items from the generator
actual_apis_with_injectors = list(_openai_api_list())
# Assert that the actual list matches the expected list
assert actual_apis_with_injectors == expected_apis_with_injectors
@pytest.mark.parametrize(
"apis_with_injectors, expected_output, expected_logs",
[
([((("MockModule", "MockAPI", "create"),), inject_sync)], [(MockAPI, "create", inject_sync)], []),
([((("MockModule", "MockAPI", "create"),), inject_async)], [(MockAPI, "create", inject_async)], []),
],
)
def test_generate_api_and_injector(apis_with_injectors, expected_output, expected_logs, caplog):
with patch("importlib.import_module", return_value=MagicMock(MockAPI=MockAPI)) as mock_import_module:
# Capture the logs
with caplog.at_level(logging.WARNING):
# Run the generator and collect the output
result = list(_generate_api_and_injector(apis_with_injectors))
# Check if the result matches the expected output
assert result == expected_output
# Check if the logs match the expected logs
assert len(caplog.records) == len(expected_logs)
for record, expected_message in zip(caplog.records, expected_logs):
assert expected_message in record.message
mock_import_module.assert_called_with("MockModule")
def test_generate_api_and_injector_attribute_error_logging(caplog):
apis = [
((("NonExistentModule", "NonExistentAPI", "create"),), MagicMock()),
((("MockModuleMissingMethod", "MockAPIMissingMethod", "missing_method"),), MagicMock()),
]
# Set up the side effect for the mock
def import_module_effect(name):
if name == "MockModuleMissingMethod":
module = MagicMock()
delattr(module, "MockAPIMissingMethod") # Use delattr to remove the attribute
return module
else:
raise ModuleNotFoundError(f"No module named '{name}'")
with patch("importlib.import_module") as mock_import_module:
mock_import_module.side_effect = import_module_effect
with caplog.at_level(logging.WARNING):
list(_generate_api_and_injector(apis))
assert len(caplog.records) == 2
assert "An unexpected error occurred" in caplog.records[0].message
assert "NonExistentModule" in caplog.records[0].message
assert "does not have the class" in caplog.records[1].message
assert "MockAPIMissingMethod" in caplog.records[1].message
# Verify that `importlib.import_module` was called with correct module names
mock_import_module.assert_any_call("NonExistentModule")
mock_import_module.assert_any_call("MockModuleMissingMethod")
@pytest.mark.unittest
def test_get_aoai_telemetry_headers():
# create a mock operation context
mock_operation_context = OperationContext()
mock_operation_context.user_agent = "test-user-agent"
mock_operation_context.update(
{
"flow_id": "test-flow-id",
"root_run_id": "test-root-run-id",
"index": 1,
"run_id": "test-run-id",
"variant_id": "test-variant-id",
}
)
# patch the OperationContext.get_instance method to return the mock operation context
with patch("promptflow._core.operation_context.OperationContext.get_instance") as mock_get_instance:
mock_get_instance.return_value = mock_operation_context
# call the function under test and get the headers
headers = get_aoai_telemetry_headers()
for key in headers.keys():
assert key.startswith(PROMPTFLOW_PREFIX) or key == USER_AGENT_HEADER
assert "_" not in key
# assert that the headers are correct
assert headers[USER_AGENT_HEADER] == f"test-user-agent promptflow/{VERSION}"
assert headers[f"{PROMPTFLOW_PREFIX}flow-id"] == "test-flow-id"
assert headers[f"{PROMPTFLOW_PREFIX}root-run-id"] == "test-root-run-id"
assert headers[f"{PROMPTFLOW_PREFIX}index"] == "1"
assert headers[f"{PROMPTFLOW_PREFIX}run-id"] == "test-run-id"
assert headers[f"{PROMPTFLOW_PREFIX}variant-id"] == "test-variant-id"
@pytest.mark.unittest
def test_inject_and_recover_openai_api():
class FakeAPIWithoutOriginal:
@staticmethod
def create():
pass
class FakeAPIWithOriginal:
@staticmethod
def create():
pass
def dummy_api():
pass
# Real injector function that adds an _original attribute
def injector(f):
def wrapper_fun(*args, **kwargs):
return f(*args, **kwargs)
wrapper_fun._original = f
return wrapper_fun
# Set an _original attribute for the create method of FakeAPIWithOriginal
FakeAPIWithOriginal.create._original = dummy_api
# Store the original create methods before injection
original_api_without_original = FakeAPIWithoutOriginal.create
original_api_with_original = FakeAPIWithOriginal.create
# Mock the generator function to yield our mocked api and method
with patch(
"promptflow._core.openai_injector.available_openai_apis_and_injectors",
return_value=[(FakeAPIWithoutOriginal, "create", injector), (FakeAPIWithOriginal, "create", injector)],
):
# Call the function to inject the APIs
inject_openai_api()
# Check that the _original attribute was set for the method that didn't have it
assert hasattr(FakeAPIWithoutOriginal.create, "_original")
# Ensure the _original attribute points to the correct original method
assert FakeAPIWithoutOriginal.create._original is original_api_without_original
# Check that the injector was not applied again to the method that already had an _original attribute
# The _original attribute should still point to the mock, not the original method
assert getattr(FakeAPIWithOriginal.create, "_original") is not FakeAPIWithOriginal.create
# The original method should remain unchanged
assert FakeAPIWithOriginal.create is original_api_with_original
# Call the function to recover the APIs
recover_openai_api()
# Check that the _original attribute was removed for the method that didn't have it
assert not hasattr(FakeAPIWithoutOriginal.create, "_original")
assert not hasattr(FakeAPIWithOriginal.create, "_original")
# The original methods should be restored
assert FakeAPIWithoutOriginal.create is original_api_without_original
assert FakeAPIWithOriginal.create is dummy_api
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_tools_manager.py | import textwrap
from pathlib import Path
from unittest.mock import patch
import pytest
from mock import MagicMock
from promptflow import tool
from promptflow._core._errors import InputTypeMismatch, InvalidSource, PackageToolNotFoundError
from promptflow._core.tools_manager import (
BuiltinsManager,
ToolLoader,
collect_package_tools,
collect_package_tools_and_connections,
)
from promptflow._utils.yaml_utils import load_yaml_string
from promptflow.contracts.flow import InputAssignment, InputValueType, Node, ToolSource, ToolSourceType
from promptflow.contracts.tool import Tool, ToolType
from promptflow.exceptions import UserErrorException
@pytest.mark.unittest
class TestToolLoader:
def test_load_tool_for_node_with_invalid_node(self):
tool_loader = ToolLoader(working_dir="test_working_dir")
node: Node = Node(name="test", tool="test_tool", inputs={}, type=ToolType.PYTHON)
with pytest.raises(UserErrorException, match="Node test does not have source defined."):
tool_loader.load_tool_for_node(node)
node: Node = Node(
name="test", tool="test_tool", inputs={}, type=ToolType.PYTHON, source=ToolSource(type="invalid_type")
)
with pytest.raises(
NotImplementedError, match="Tool source type invalid_type for python tool is not supported yet."
):
tool_loader.load_tool_for_node(node)
node: Node = Node(
name="test", tool="test_tool", inputs={}, type=ToolType.CUSTOM_LLM, source=ToolSource(type="invalid_type")
)
with pytest.raises(
NotImplementedError, match="Tool source type invalid_type for custom_llm tool is not supported yet."
):
tool_loader.load_tool_for_node(node)
node: Node = Node(
name="test", tool="test_tool", inputs={}, type="invalid_type", source=ToolSource(type=ToolSourceType.Code)
)
with pytest.raises(NotImplementedError, match="Tool type invalid_type is not supported yet."):
tool_loader.load_tool_for_node(node)
def test_load_tool_for_package_node(self, mocker):
package_tools = {"test_tool": Tool(name="test_tool", type=ToolType.PYTHON, inputs={}).serialize()}
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tools)
tool_loader = ToolLoader(
working_dir="test_working_dir", package_tool_keys=["promptflow._core.tools_manager.collect_package_tools"]
)
node: Node = Node(
name="test",
tool="test_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="test_tool"),
)
tool = tool_loader.load_tool_for_node(node)
assert tool.name == "test_tool"
node: Node = Node(
name="test",
tool="test_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="invalid_tool"),
)
msg = (
"Package tool 'invalid_tool' is not found in the current environment. "
"All available package tools are: ['test_tool']."
)
with pytest.raises(PackageToolNotFoundError) as ex:
tool_loader.load_tool_for_node(node)
assert str(ex.value) == msg
def test_load_tool_for_package_node_with_legacy_tool_id(self, mocker):
package_tools = {
"new_tool_1": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_1"]
).serialize(),
"new_tool_2": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_2"]
).serialize(),
"old_tool_2": Tool(name="old tool 2", type=ToolType.PYTHON, inputs={}).serialize(),
}
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tools)
tool_loader = ToolLoader(working_dir="test_working_dir", package_tool_keys=list(package_tools.keys()))
node_with_legacy_tool: Node = Node(
name="test_legacy_tool",
tool="old_tool_1",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="old_tool_1"),
)
assert tool_loader.load_tool_for_node(node_with_legacy_tool).name == "new tool 1"
node_with_legacy_tool_but_in_package_tools: Node = Node(
name="test_legacy_tool_but_in_package_tools",
tool="old_tool_2",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Package, tool="old_tool_2"),
)
assert tool_loader.load_tool_for_node(node_with_legacy_tool_but_in_package_tools).name == "old tool 2"
def test_load_tool_for_script_node(self):
working_dir = Path(__file__).parent
tool_loader = ToolLoader(working_dir=working_dir)
file = "test_tools_manager.py"
node: Node = Node(
name="test",
tool="sample_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Code, path=file),
)
tool = tool_loader.load_tool_for_node(node)
assert tool.name == "sample_tool"
@pytest.mark.parametrize(
"source_path, error_message",
[
(None, "Load tool failed for node 'test'. The source path is 'None'."),
("invalid_file.py", "Load tool failed for node 'test'. Tool file 'invalid_file.py' can not be found."),
],
)
def test_load_tool_for_script_node_exception(self, source_path, error_message):
working_dir = Path(__file__).parent
tool_loader = ToolLoader(working_dir=working_dir)
node: Node = Node(
name="test",
tool="sample_tool",
inputs={},
type=ToolType.PYTHON,
source=ToolSource(type=ToolSourceType.Code, path=source_path),
)
with pytest.raises(InvalidSource) as ex:
tool_loader.load_tool_for_script_node(node)
assert str(ex.value) == error_message
# This tool is for testing tools_manager.ToolLoader.load_tool_for_script_node
@tool
def sample_tool(input: str):
return input
@pytest.mark.unittest
class TestToolsManager:
def test_collect_package_tools_if_node_source_tool_is_legacy(self):
legacy_node_source_tools = ["content_safety_text.tools.content_safety_text_tool.analyze_text"]
package_tools = collect_package_tools(legacy_node_source_tools)
assert "promptflow.tools.azure_content_safety.analyze_text" in package_tools.keys()
def test_collect_package_tools_and_connections(self, install_custom_tool_pkg):
keys = ["my_tool_package.tools.my_tool_2.MyTool.my_tool"]
tools, specs, templates = collect_package_tools_and_connections(keys)
assert len(tools) == 1
assert specs == {
"my_tool_package.connections.MyFirstConnection": {
"connectionCategory": "CustomKeys",
"flowValueType": "CustomConnection",
"connectionType": "MyFirstConnection",
"ConnectionTypeDisplayName": "MyFirstConnection",
"configSpecs": [
{"name": "api_key", "displayName": "Api Key", "configValueType": "Secret", "isOptional": False},
{"name": "api_base", "displayName": "Api Base", "configValueType": "str", "isOptional": True},
],
"module": "my_tool_package.connections",
"package": "test-custom-tools",
"package_version": "0.0.2",
}
}
expected_template = {
"$schema": "https://azuremlschemas.azureedge.net/promptflow/latest/CustomStrongTypeConnection.schema.json",
"name": "to_replace_with_connection_name",
"type": "custom",
"custom_type": "MyFirstConnection",
"module": "my_tool_package.connections",
"package": "test-custom-tools",
"package_version": "0.0.2",
"configs": {"api_base": "This is my first connection."},
"secrets": {"api_key": "to_replace_with_api_key"},
}
loaded_yaml = load_yaml_string(templates["my_tool_package.connections.MyFirstConnection"])
assert loaded_yaml == expected_template
keys = ["my_tool_package.tools.my_tool_with_custom_strong_type_connection.my_tool"]
tools, specs, templates = collect_package_tools_and_connections(keys)
assert len(templates) == 1
expected_template = """
name: "to_replace_with_connection_name"
type: custom
custom_type: MyCustomConnection
module: my_tool_package.tools.my_tool_with_custom_strong_type_connection
package: test-custom-tools
package_version: 0.0.2
configs:
api_url: "This is a fake api url." # String type. The api url.
secrets: # must-have
api_key: "to_replace_with_api_key" # String type. The api key.
"""
content = templates["my_tool_package.tools.my_tool_with_custom_strong_type_connection.MyCustomConnection"]
expected_template_str = textwrap.dedent(expected_template)
assert expected_template_str in content
def test_gen_dynamic_list(self, mocked_ws_triple, mock_module_with_list_func):
from promptflow._sdk._utils import _gen_dynamic_list
func_path = "my_tool_package.tools.tool_with_dynamic_list_input.my_list_func"
func_kwargs = {"prefix": "My"}
result = _gen_dynamic_list({"func_path": func_path, "func_kwargs": func_kwargs})
assert len(result) == 2
# test gen_dynamic_list with ws_triple.
with patch("promptflow._cli._utils.get_workspace_triad_from_local", return_value=mocked_ws_triple):
result = _gen_dynamic_list({"func_path": func_path, "func_kwargs": func_kwargs})
assert len(result) == 2
@pytest.mark.unittest
class TestBuiltinsManager:
def test_load_tool_from_module(
self,
):
# Test case 1: When class_name is None
module = MagicMock()
tool_name = "test_tool"
module_name = "test_module"
class_name = None
method_name = "test_method"
node_inputs = {"input1": InputAssignment(value_type=InputValueType.LITERAL, value="value1")}
# Mock the behavior of the module and class
module.test_method = MagicMock()
# Call the method
api, init_inputs = BuiltinsManager._load_tool_from_module(
module, tool_name, module_name, class_name, method_name, node_inputs
)
# Assertions
assert api == module.test_method
assert init_inputs == {}
# Non literal input for init parameter will raise exception.
module = MagicMock()
tool_name = "test_tool"
module_name = "test_module"
class_name = "TestClass"
method_name = "test_method"
node_inputs = {"input1": InputAssignment(value_type=InputValueType.FLOW_INPUT, value="value1")}
# Mock the behavior of the module and class
module.TestClass = MagicMock()
module.TestClass.get_initialize_inputs = MagicMock(return_value=["input1"])
module.TestClass.get_required_initialize_inputs = MagicMock(return_value=["input1"])
module.TestClass.test_method = MagicMock()
# Call the method
with pytest.raises(InputTypeMismatch) as ex:
BuiltinsManager._load_tool_from_module(module, tool_name, module_name, class_name, method_name, node_inputs)
expected_message = (
"Invalid input for 'test_tool': Initialization input 'input1' requires a literal value, "
"but ${flow.value1} was received."
)
assert expected_message == str(ex.value)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_connection_manager.py | import pytest
from promptflow._core.connection_manager import ConnectionManager
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.tool import ConnectionType
def get_connection_dict():
return {
"azure_open_ai_connection": {
"type": "AzureOpenAIConnection",
"value": {
"api_key": "<azure-openai-key>",
"api_base": "<api-base>",
"api_type": "azure",
"api_version": "2023-07-01-preview",
},
},
"custom_connection": {
"type": "CustomConnection",
"value": {
"api_key": "<your-key>",
"url": "https://api.bing.microsoft.com/v7.0/search",
},
"module": "promptflow.connections",
"secret_keys": ["api_key"],
},
}
@pytest.mark.unittest
class TestConnectionManager:
def test_build_connections(self):
new_connection = get_connection_dict()
# Add not exist key
new_connection["azure_open_ai_connection"]["value"]["not_exist"] = "test"
connection_manager = ConnectionManager(new_connection)
assert len(connection_manager._connections) == 2
assert isinstance(connection_manager.get("azure_open_ai_connection"), AzureOpenAIConnection)
assert connection_manager.to_connections_dict() == new_connection
def test_serialize(self):
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
assert (
ConnectionType.serialize_conn(connection_manager.get("azure_open_ai_connection"))
== "azure_open_ai_connection"
)
assert ConnectionType.serialize_conn(connection_manager.get("custom_connection")) == "custom_connection"
def test_get_secret_list(self):
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
expected_list = ["<azure-openai-key>", "<your-key>"]
assert set(connection_manager.get_secret_list()) == set(expected_list)
def test_is_secret(self):
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
connection = connection_manager.get("custom_connection")
assert connection.is_secret("api_key") is True
assert connection.is_secret("url") is False
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_generator_proxy.py | import pytest
from promptflow._core.generator_proxy import GeneratorProxy, generate_from_proxy
def generator():
for i in range(3):
yield i
def iterator():
return iter([0, 1, 2])
@pytest.mark.unittest
def test_generator_proxy_next():
proxy = GeneratorProxy(generator())
assert proxy.items == []
assert next(proxy) == 0
assert next(proxy) == 1
assert next(proxy) == 2
with pytest.raises(StopIteration):
next(proxy)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generator_proxy_iter():
original_generator = generator()
proxy = GeneratorProxy(generator())
for num in proxy:
assert num == next(original_generator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generate_from_proxy():
proxy = GeneratorProxy(generator())
original_generator = generator()
for i in generate_from_proxy(proxy):
assert i == next(original_generator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_iterator_proxy_next():
proxy = GeneratorProxy(iterator())
assert proxy.items == []
assert next(proxy) == 0
assert next(proxy) == 1
assert next(proxy) == 2
with pytest.raises(StopIteration):
next(proxy)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_iterator_proxy_iter():
original_iterator = iterator()
proxy = GeneratorProxy(iterator())
for num in proxy:
assert num == next(original_iterator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generate_from_iterator_proxy():
proxy = GeneratorProxy(iterator())
original_iterator = iterator()
for i in generate_from_proxy(proxy):
assert i == next(original_iterator)
assert proxy.items == [0, 1, 2]
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_tool.py | import inspect
import pytest
from promptflow import tool
from promptflow._core.tool import InputSetting, ToolType
from promptflow._core.tracer import Tracer, TraceType
from promptflow.exceptions import UserErrorException
@tool
def decorated_without_parentheses(a: int):
return a
@tool()
def decorated_with_parentheses(a: int):
return a
@tool
async def decorated_without_parentheses_async(a: int):
return a
@tool()
async def decorated_with_parentheses_async(a: int):
return a
@tool(
name="tool_with_attributes",
description="Sample tool with a lot of attributes",
type=ToolType.LLM,
input_settings=InputSetting(),
streaming_option_parameter="stream",
extra_a="a",
extra_b="b",
)
def tool_with_attributes(stream: bool, a: int, b: int):
return stream, a, b
@pytest.mark.unittest
class TestTool:
"""This class tests the `tool` decorator."""
@pytest.mark.asyncio
@pytest.mark.parametrize(
"func",
[
decorated_with_parentheses,
decorated_without_parentheses,
decorated_with_parentheses_async,
decorated_without_parentheses_async,
],
)
async def test_traces_are_created_correctly(self, func):
Tracer.start_tracing("test_run_id")
if inspect.iscoroutinefunction(func):
result = await func(1)
else:
result = func(1)
assert result == 1
traces = Tracer.end_tracing()
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.TOOL
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
def test_attributes_are_set_to_the_tool_function(self):
stream, a, b = tool_with_attributes(True, 1, 2)
# Check the results are as expected
assert stream is True
assert a == 1
assert b == 2
# Check the attributes are set to the function
assert getattr(tool_with_attributes, "__tool") is None
assert getattr(tool_with_attributes, "__name") == "tool_with_attributes"
assert getattr(tool_with_attributes, "__description") == "Sample tool with a lot of attributes"
assert getattr(tool_with_attributes, "__type") == ToolType.LLM
assert getattr(tool_with_attributes, "__input_settings") == InputSetting()
assert getattr(tool_with_attributes, "__extra_info") == {"extra_a": "a", "extra_b": "b"}
assert getattr(tool_with_attributes, "_streaming_option_parameter") == "stream"
def test_invalid_tool_type_should_raise_error(self):
with pytest.raises(UserErrorException, match="Tool type invalid_type is not supported yet."):
@tool(type="invalid_type")
def invalid_tool_type():
pass
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_log_manager.py | import logging
import sys
import time
from multiprocessing.pool import ThreadPool
import pytest
from dateutil.parser import parse
from promptflow._core.log_manager import NodeLogManager, NodeLogWriter
RUN_ID = "dummy_run_id"
NODE_NAME = "dummy_node"
LINE_NUMBER = 1
def assert_print_result(i: int, run_logger: NodeLogWriter):
run_id = f"{RUN_ID}-{i}"
run_logger.set_node_info(run_id, NODE_NAME, LINE_NUMBER)
time.sleep(i / 10)
print(i)
assert_datetime_prefix(run_logger.get_log(run_id), str(i) + "\n")
def is_datetime(string: str) -> bool:
"""Check if a string follows datetime format."""
try:
parse(string)
return True
except ValueError:
return False
def assert_datetime_prefix(string: str, expected_str: str):
"""Assert if string has a datetime prefix, such as:
[2023-04-17T07:49:54+0000] example string
"""
datetime_prefix = string[string.index("[") + 1 : string.index("]")]
inner_str = string[string.index("]") + 2 :]
assert is_datetime(datetime_prefix)
assert inner_str == expected_str
@pytest.mark.unittest
class TestNodeLogManager:
def test_get_logs(self):
with NodeLogManager(record_datetime=False) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
print("test2")
print("test stderr", file=sys.stderr)
assert lm.get_logs(RUN_ID).get("stdout") == "test\ntest2\n"
assert lm.get_logs(RUN_ID).get("stderr") == "test stderr\n"
lm.clear_node_context(RUN_ID)
assert lm.get_logs(RUN_ID).get("stdout") is None
assert lm.get_logs(RUN_ID).get("stderr") is None
def test_logging(self):
with NodeLogManager(record_datetime=False) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
stdout_logger = logging.getLogger("stdout")
stderr_logger = logging.getLogger("stderr")
stdout_logger.addHandler(logging.StreamHandler(stream=sys.stdout))
stderr_logger.addHandler(logging.StreamHandler(stream=sys.stderr))
stdout_logger.warning("test stdout")
stderr_logger.warning("test stderr")
logs = lm.get_logs(RUN_ID)
assert logs.get("stdout") == "test stdout\n"
assert logs.get("stderr") == "test stderr\n"
def test_exit_context_manager(self):
with NodeLogManager() as lm:
assert lm.stdout_logger is sys.stdout
assert lm.stdout_logger != sys.stdout
def test_datetime_prefix(self):
with NodeLogManager(record_datetime=True) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
print("test2")
output = lm.get_logs(RUN_ID).get("stdout")
outputs = output.split("\n")
assert_datetime_prefix(outputs[0], "test")
assert_datetime_prefix(outputs[1], "test2")
assert outputs[2] == ""
@pytest.mark.unittest
class TestNodeLogWriter:
def test_set_node_info(self):
run_logger = NodeLogWriter(sys.stdout)
assert run_logger.get_log(RUN_ID) is None
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
assert run_logger.get_log(RUN_ID) == ""
def test_clear_node_info(self):
run_logger = NodeLogWriter(sys.stdout)
run_logger.clear_node_info(RUN_ID)
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
run_logger.clear_node_info(RUN_ID)
assert run_logger.run_id_to_stdout.get(RUN_ID) is None
def test_get_log(self):
run_logger = NodeLogWriter(sys.stdout)
sys.stdout = run_logger
print("test")
assert run_logger.get_log(RUN_ID) is None
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
assert_datetime_prefix(run_logger.get_log(RUN_ID), "test\n")
run_logger.clear_node_info(RUN_ID)
assert run_logger.get_log(RUN_ID) is None
def test_multi_thread(self):
run_logger = NodeLogWriter(sys.stdout)
sys.stdout = run_logger
with ThreadPool(processes=10) as pool:
results = pool.starmap(assert_print_result, ((i, run_logger) for i in range(10)))
for r in results:
pass
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_core/test_metric_logger.py | import pytest
from promptflow._core.metric_logger import MetricLoggerManager, add_metric_logger, log_metric, remove_metric_logger
@pytest.mark.unittest
class TestMetricLogger:
def test_add_and_remove_metric_logger(self):
# define log metric function
metrics = {}
def _log_metric(key, value):
metrics[key] = value
def _log_metric_invalid(key, value, variant_id, extra_param):
metrics[key] = {variant_id: {value: extra_param}}
add_metric_logger(_log_metric)
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
add_metric_logger(_log_metric)
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
add_metric_logger(_log_metric_invalid)
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
add_metric_logger("test")
assert MetricLoggerManager.get_instance()._metric_loggers == [_log_metric]
remove_metric_logger(_log_metric)
assert MetricLoggerManager.get_instance()._metric_loggers == []
def test_log_metric(self):
# define log metric function
metrics = {}
def _log_metric(key, value):
metrics[key] = value
def _log_metric_with_variant_id(key, value, variant_id):
metrics[key] = {variant_id: value}
add_metric_logger(_log_metric)
log_metric("test1", 1)
assert metrics == {"test1": 1}
add_metric_logger(_log_metric_with_variant_id)
log_metric("test2", 1, "line_0")
assert metrics == {"test1": 1, "test2": {"line_0": 1}}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/processpool/test_line_execution_process_pool.py | import multiprocessing
import os
import sys
import uuid
from multiprocessing import Queue
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import patch
import pytest
from pytest_mock import MockFixture
from promptflow._utils.logger_utils import LogContext
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, UserErrorException
from promptflow.executor import FlowExecutor
from promptflow.executor._errors import SpawnedForkProcessManagerStartFailure
from promptflow.executor._line_execution_process_pool import (
LineExecutionProcessPool,
_exec_line,
format_current_process_info,
get_available_max_worker_count,
log_process_status,
)
from promptflow.executor._process_manager import create_spawned_fork_process_manager
from promptflow.executor._result import LineResult
from ...utils import get_flow_sample_inputs, get_yaml_file
SAMPLE_FLOW = "web_classification_no_variants"
def get_line_inputs(flow_folder=""):
if flow_folder:
inputs = get_bulk_inputs(flow_folder)
return inputs[0]
return {
"url": "https://www.microsoft.com/en-us/windows/",
"text": "some_text",
}
def get_bulk_inputs(nlinee=4, flow_folder="", sample_inputs_file="", return_dict=False):
if flow_folder:
if not sample_inputs_file:
sample_inputs_file = "samples.json"
inputs = get_flow_sample_inputs(flow_folder, sample_inputs_file=sample_inputs_file)
if isinstance(inputs, list) and len(inputs) > 0:
return inputs
elif isinstance(inputs, dict):
if return_dict:
return inputs
return [inputs]
else:
raise Exception(f"Invalid type of bulk input: {inputs}")
return [get_line_inputs() for _ in range(nlinee)]
def execute_in_fork_mode_subprocess(
dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process
):
os.environ["PF_BATCH_METHOD"] = "fork"
if is_set_environ_pf_worker_count:
os.environ["PF_WORKER_COUNT"] = pf_worker_count
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
assert pool._n_process == n_process
if is_set_environ_pf_worker_count:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment " f"variable 'PF_WORKER_COUNT'."
)
else:
factors = {
"default_worker_count": pool._DEFAULT_WORKER_COUNT,
"row_count": pool._nlines,
}
mock_logger.info.assert_any_call(
f"Set process count to {n_process} by taking the minimum value among the " f"factors of {factors}."
)
def execute_in_spawn_mode_subprocess(
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
):
os.environ["PF_BATCH_METHOD"] = "spawn"
if is_set_environ_pf_worker_count:
os.environ["PF_WORKER_COUNT"] = pf_worker_count
executor = FlowExecutor.create(
get_yaml_file(flow_folder),
dev_connections,
)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with patch("psutil.virtual_memory") as mock_mem:
mock_mem.return_value.available = 128.0 * 1024 * 1024
with patch("psutil.Process") as mock_process:
mock_process.return_value.memory_info.return_value.rss = 64 * 1024 * 1024
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
assert pool._n_process == n_process
if is_set_environ_pf_worker_count and is_calculation_smaller_than_set:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment "
f"variable 'PF_WORKER_COUNT'."
)
mock_logger.warning.assert_any_call(
f"The current process count ({pf_worker_count}) is larger than recommended process count "
f"({estimated_available_worker_count}) that estimated by system available memory. This may "
f"cause memory exhaustion"
)
elif is_set_environ_pf_worker_count and not is_calculation_smaller_than_set:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment "
f"variable 'PF_WORKER_COUNT'."
)
elif not is_set_environ_pf_worker_count:
factors = {
"default_worker_count": pool._DEFAULT_WORKER_COUNT,
"row_count": pool._nlines,
"estimated_worker_count_based_on_memory_usage": estimated_available_worker_count,
}
mock_logger.info.assert_any_call(
f"Set process count to {n_process} by taking the minimum value among the factors "
f"of {factors}."
)
def create_line_execution_process_pool(dev_connections):
executor = FlowExecutor.create(get_yaml_file(SAMPLE_FLOW), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
line_execution_process_pool = LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
line_timeout_sec=1,
)
return line_execution_process_pool
def set_environment_successed_in_subprocess(dev_connections, pf_batch_method):
os.environ["PF_BATCH_METHOD"] = pf_batch_method
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork is False
def set_environment_failed_in_subprocess(dev_connections):
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
mock_logger.warning.return_value = None
os.environ["PF_BATCH_METHOD"] = "test"
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork == (multiprocessing.get_start_method() == "fork")
sys_start_methods = multiprocessing.get_all_start_methods()
exexpected_log_message = (
"Failed to set start method to 'test', start method test" f" is not in: {sys_start_methods}."
)
mock_logger.warning.assert_called_once_with(exexpected_log_message)
def not_set_environment_in_subprocess(dev_connections):
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork == (multiprocessing.get_start_method() == "fork")
def custom_create_spawned_fork_process_manager(*args, **kwargs):
create_spawned_fork_process_manager("test", *args, **kwargs)
@pytest.mark.unittest
class TestLineExecutionProcessPool:
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_line_execution_process_pool(self, flow_folder, dev_connections):
log_path = str(Path(mkdtemp()) / "test.log")
log_context_initializer = LogContext(log_path).get_initializer()
log_context = log_context_initializer()
with log_context:
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
executor._log_interval = 1
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
run_id = run_id or str(uuid.uuid4())
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
result_list = pool.run(zip(range(nlines), bulk_inputs))
assert len(result_list) == nlines
for i, line_result in enumerate(result_list):
assert isinstance(line_result, LineResult)
assert line_result.run_info.status == Status.Completed, f"{i}th line got {line_result.run_info.status}"
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_line_execution_not_completed(self, flow_folder, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
line_timeout_sec=1,
) as pool:
result_list = pool.run(zip(range(nlines), bulk_inputs))
result_list = sorted(result_list, key=lambda r: r.run_info.index)
assert len(result_list) == nlines
for i, line_result in enumerate(result_list):
assert isinstance(line_result, LineResult)
assert line_result.run_info.error["message"] == f"Line {i} execution timeout for exceeding 1 seconds"
assert line_result.run_info.error["code"] == "UserError"
assert line_result.run_info.status == Status.Failed
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_exec_line(self, flow_folder, dev_connections, mocker: MockFixture):
output_queue = Queue()
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
line_inputs = get_line_inputs()
line_result = _exec_line(
executor=executor,
output_queue=output_queue,
inputs=line_inputs,
run_id=run_id,
index=0,
line_timeout_sec=600,
)
assert isinstance(line_result, LineResult)
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_exec_line_failed_when_line_execution_not_start(self, flow_folder, dev_connections, mocker: MockFixture):
output_queue = Queue()
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
test_error_msg = "Test user error"
with patch("promptflow.executor.flow_executor.FlowExecutor.exec_line", autouse=True) as mock_exec_line:
mock_exec_line.side_effect = UserErrorException(
message=test_error_msg, target=ErrorTarget.AZURE_RUN_STORAGE
)
run_id = str(uuid.uuid4())
line_inputs = get_line_inputs()
line_result = _exec_line(
executor=executor,
output_queue=output_queue,
inputs=line_inputs,
run_id=run_id,
index=0,
line_timeout_sec=600,
)
assert isinstance(line_result, LineResult)
assert line_result.run_info.error["message"] == test_error_msg
assert line_result.run_info.error["code"] == "UserError"
assert line_result.run_info.status == Status.Failed
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_process_pool_run_with_exception(self, flow_folder, dev_connections, mocker: MockFixture):
# mock process pool run execution raise error
test_error_msg = "Test user error"
mocker.patch(
"promptflow.executor._line_execution_process_pool.LineExecutionProcessPool."
"_monitor_workers_and_process_tasks_in_thread",
side_effect=UserErrorException(message=test_error_msg, target=ErrorTarget.AZURE_RUN_STORAGE),
)
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
with pytest.raises(UserErrorException) as e:
pool.run(zip(range(nlines), bulk_inputs))
assert e.value.message == test_error_msg
assert e.value.target == ErrorTarget.AZURE_RUN_STORAGE
assert e.value.error_codes[0] == "UserError"
@pytest.mark.parametrize(
("flow_folder", "is_set_environ_pf_worker_count", "pf_worker_count", "n_process"),
[(SAMPLE_FLOW, True, "3", 3), (SAMPLE_FLOW, False, None, 4)],
)
def test_process_pool_parallelism_in_fork_mode(
self, dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process
):
if "fork" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: fork")
p = multiprocessing.Process(
target=execute_in_fork_mode_subprocess,
args=(dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process),
)
p.start()
p.join()
assert p.exitcode == 0
@pytest.mark.parametrize(
(
"flow_folder",
"is_set_environ_pf_worker_count",
"is_calculation_smaller_than_set",
"pf_worker_count",
"estimated_available_worker_count",
"n_process",
),
[
(SAMPLE_FLOW, True, False, "2", 4, 2),
(SAMPLE_FLOW, True, True, "6", 2, 6),
(SAMPLE_FLOW, False, True, None, 2, 2),
],
)
def test_process_pool_parallelism_in_spawn_mode(
self,
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
):
if "spawn" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: spawn")
p = multiprocessing.Process(
target=execute_in_spawn_mode_subprocess,
args=(
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
),
)
p.start()
p.join()
assert p.exitcode == 0
def test_process_set_environment_variable_successed(self, dev_connections):
p = multiprocessing.Process(
target=set_environment_successed_in_subprocess,
args=(
dev_connections,
"spawn",
),
)
p.start()
p.join()
assert p.exitcode == 0
def test_process_set_environment_variable_failed(self, dev_connections):
p = multiprocessing.Process(target=set_environment_failed_in_subprocess, args=(dev_connections,))
p.start()
p.join()
assert p.exitcode == 0
def test_process_not_set_environment_variable(self, dev_connections):
p = multiprocessing.Process(target=not_set_environment_in_subprocess, args=(dev_connections,))
p.start()
p.join()
assert p.exitcode == 0
@pytest.mark.skipif(sys.platform == "win32" or sys.platform == "darwin", reason="Only test on linux")
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
@patch(
"promptflow.executor._process_manager.create_spawned_fork_process_manager",
custom_create_spawned_fork_process_manager,
)
def test_spawned_fork_process_manager_crashed_in_fork_mode(self, flow_folder, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
run_id = run_id or str(uuid.uuid4())
with pytest.raises(SpawnedForkProcessManagerStartFailure) as e:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
pool.run(zip(range(nlines), bulk_inputs))
assert "Failed to start spawned fork process manager" in str(e.value)
class TestGetAvailableMaxWorkerCount:
@pytest.mark.parametrize(
"available_memory, process_memory, expected_max_worker_count, actual_calculate_worker_count",
[
(128.0, 64.0, 2, 2), # available_memory/process_memory > 1
(63.0, 64.0, 1, 0), # available_memory/process_memory < 1
],
)
def test_get_available_max_worker_count(
self, available_memory, process_memory, expected_max_worker_count, actual_calculate_worker_count
):
with patch("psutil.virtual_memory") as mock_mem:
mock_mem.return_value.available = available_memory * 1024 * 1024
with patch("psutil.Process") as mock_process:
mock_process.return_value.memory_info.return_value.rss = process_memory * 1024 * 1024
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
mock_logger.warning.return_value = None
estimated_available_worker_count = get_available_max_worker_count()
assert estimated_available_worker_count == expected_max_worker_count
if actual_calculate_worker_count < 1:
mock_logger.warning.assert_called_with(
f"Current system's available memory is {available_memory}MB, less than the memory "
f"{process_memory}MB required by the process. The maximum available worker count is 1."
)
else:
mock_logger.info.assert_called_with(
f"Current system's available memory is {available_memory}MB, "
f"memory consumption of current process is {process_memory}MB, "
f"estimated available worker count is {available_memory}/{process_memory} "
f"= {actual_calculate_worker_count}"
)
@pytest.mark.unittest
class TestFormatCurrentProcess:
def test_format_current_process_info(self):
process_name = "process_name"
process_pid = 123
line_number = 13
formatted_message = format_current_process_info(process_name, process_pid, line_number)
expected_returned_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number})"
)
assert formatted_message == expected_returned_log_message
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_start_execution(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) start execution."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_completed(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number, is_completed=True)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) completed."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_failed(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number, is_failed=True)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) failed."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_multimedia_utils.py | import re
from pathlib import Path
from unittest.mock import MagicMock, mock_open, patch
import pytest
from promptflow._utils._errors import InvalidImageInput, LoadMultimediaDataError
from promptflow._utils.multimedia_utils import (
_create_image_from_base64,
_create_image_from_file,
_create_image_from_url,
_process_multimedia_dict_recursively,
_process_recursively,
convert_multimedia_data_to_base64,
create_image,
load_multimedia_data,
persist_multimedia_data,
resolve_multimedia_data_recursively,
)
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.multimedia import Image
from promptflow.contracts.tool import ValueType
from ...utils import DATA_ROOT
TEST_IMAGE_PATH = DATA_ROOT / "logo.jpg"
@pytest.mark.unittest
class TestMultimediaUtils:
@pytest.mark.parametrize("image_path", ["logo.jpg", "logo.png", "logo.webp", "logo.gif"])
def test_create_image_from_base64(self, image_path):
image = _create_image_from_file(DATA_ROOT / image_path)
base64_str = image.to_base64()
image_from_base64 = _create_image_from_base64(base64_str)
assert str(image) == str(image_from_base64)
format = image_path.split(".")[-1]
mime_type = f"image/{format}" if format != "jpg" else "image/jpeg"
assert mime_type == image_from_base64._mime_type
@patch("requests.get")
def test_create_image_from_url_with_mime_type(self, mock_get):
url = "https://example.com/image.jpg"
content = b"image content"
mime_type = "image/jpeg"
mock_get.return_value = MagicMock(status_code=200, content=content)
image = _create_image_from_url(url, mime_type)
assert isinstance(image, Image)
assert image._mime_type == mime_type
assert image.source_url == url
@patch("requests.get")
def test_create_image_from_url_failure(self, mock_get):
url = "https://example.com/image.jpg"
message = "Failed to fetch image"
code = 404
mock_get.return_value = MagicMock(status_code=code, text=message)
with pytest.raises(InvalidImageInput) as ex:
_create_image_from_url(url)
expected_message = f"Failed to fetch image from URL: {url}. Error code: {code}. Error message: {message}."
assert str(ex.value) == expected_message
def test_create_image_with_dict(self, mocker):
## From path
image_dict = {"data:image/jpg;path": TEST_IMAGE_PATH}
image_from_path = create_image(image_dict)
assert image_from_path._mime_type == "image/jpg"
## From base64
image_dict = {"data:image/jpg;base64": image_from_path.to_base64()}
image_from_base64 = create_image(image_dict)
assert str(image_from_path) == str(image_from_base64)
assert image_from_base64._mime_type == "image/jpg"
## From url
mocker.patch("requests.get", return_value=mocker.Mock(content=image_from_path, status_code=200))
image_dict = {"data:image/jpg;url": ""}
image_from_url = create_image(image_dict)
assert str(image_from_path) == str(image_from_url)
assert image_from_url._mime_type == "image/jpg"
mocker.patch("requests.get", return_value=mocker.Mock(content=None, status_code=404))
with pytest.raises(InvalidImageInput) as ex:
create_image(image_dict)
assert "Failed to fetch image from URL" in ex.value.message_format
def test_create_image_with_string(self, mocker):
## From path
image_from_path = create_image(str(TEST_IMAGE_PATH))
assert image_from_path._mime_type == "image/jpeg"
# From base64
image_from_base64 = create_image(image_from_path.to_base64())
assert str(image_from_path) == str(image_from_base64)
assert image_from_base64._mime_type == "image/jpeg"
## From url
mocker.patch("promptflow._utils.multimedia_utils._is_url", return_value=True)
mocker.patch("promptflow._utils.multimedia_utils._is_base64", return_value=False)
mocker.patch("requests.get", return_value=mocker.Mock(content=image_from_path, status_code=200))
image_from_url = create_image("Test")
assert str(image_from_path) == str(image_from_url)
assert image_from_url._mime_type == "image/jpeg"
## From image
image_from_image = create_image(image_from_path)
assert str(image_from_path) == str(image_from_image)
def test_create_image_with_invalid_cases(self):
# Test invalid input type
with pytest.raises(InvalidImageInput) as ex:
create_image(0)
assert "Unsupported image input type" in ex.value.message_format
# Test invalid image dict
with pytest.raises(InvalidImageInput) as ex:
invalid_image_dict = {"invalid_image": "invalid_image"}
create_image(invalid_image_dict)
assert "Invalid image input format" in ex.value.message_format
# Test none or empty input value
with pytest.raises(InvalidImageInput) as ex:
create_image(None)
assert "Unsupported image input type" in ex.value.message_format
with pytest.raises(InvalidImageInput) as ex:
create_image("")
assert "The image input should not be empty." in ex.value.message_format
def test_persist_multimedia_date(self, mocker):
image = _create_image_from_file(TEST_IMAGE_PATH)
mocker.patch("builtins.open", mock_open())
data = {"image": image, "images": [image, image, "other_data"], "other_data": "other_data"}
persisted_data = persist_multimedia_data(data, base_dir=Path(__file__).parent)
file_name = re.compile(r"^[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}.jpeg$")
assert re.match(file_name, persisted_data["image"]["data:image/jpeg;path"])
assert re.match(file_name, persisted_data["images"][0]["data:image/jpeg;path"])
assert re.match(file_name, persisted_data["images"][1]["data:image/jpeg;path"])
def test_convert_multimedia_date_to_base64(self):
image = _create_image_from_file(TEST_IMAGE_PATH)
data = {"image": image, "images": [image, image, "other_data"], "other_data": "other_data"}
base64_data = convert_multimedia_data_to_base64(data)
assert base64_data == {
"image": image.to_base64(),
"images": [image.to_base64(), image.to_base64(), "other_data"],
"other_data": "other_data",
}
base64_data = convert_multimedia_data_to_base64(data, with_type=True)
prefix = f"data:{image._mime_type};base64,"
assert base64_data == {
"image": prefix + image.to_base64(),
"images": [prefix + image.to_base64(), prefix + image.to_base64(), "other_data"],
"other_data": "other_data",
}
def test_load_multimedia_data(self):
# Case 1: Test normal node
inputs = {
"image": FlowInputDefinition(type=ValueType.IMAGE),
"images": FlowInputDefinition(type=ValueType.LIST),
"object": FlowInputDefinition(type=ValueType.OBJECT),
}
image_dict = {"data:image/jpg;path": str(TEST_IMAGE_PATH)}
line_inputs = {
"image": image_dict,
"images": [image_dict, image_dict],
"object": {"image": image_dict, "other_data": "other_data"},
}
updated_inputs = load_multimedia_data(inputs, line_inputs)
image = _create_image_from_file(TEST_IMAGE_PATH)
assert updated_inputs == {
"image": image,
"images": [image, image],
"object": {"image": image, "other_data": "other_data"},
}
# Case 2: Test aggregation node
line_inputs = {
"image": [image_dict, image_dict],
"images": [[image_dict, image_dict], [image_dict]],
"object": [{"image": image_dict, "other_data": "other_data"}, {"other_data": "other_data"}],
}
updated_inputs = load_multimedia_data(inputs, line_inputs)
assert updated_inputs == {
"image": [image, image],
"images": [[image, image], [image]],
"object": [{"image": image, "other_data": "other_data"}, {"other_data": "other_data"}],
}
# Case 3: Test invalid input type
with pytest.raises(LoadMultimediaDataError) as ex:
line_inputs = {"image": 0}
load_multimedia_data(inputs, line_inputs)
assert (
"Failed to load image for input 'image': "
"(InvalidImageInput) Unsupported image input type") in ex.value.message
def test_resolve_multimedia_data_recursively(self):
image_dict = {"data:image/jpg;path": "logo.jpg"}
value = {
"image": image_dict,
"images": [image_dict, image_dict],
"object": {"image": image_dict, "other_data": "other_data"},
}
input_dir = TEST_IMAGE_PATH
updated_value = resolve_multimedia_data_recursively(input_dir, value)
updated_image_dict = {"data:image/jpg;path": str(DATA_ROOT / "logo.jpg")}
assert updated_value == {
"image": updated_image_dict,
"images": [updated_image_dict, updated_image_dict],
"object": {"image": updated_image_dict, "other_data": "other_data"},
}
def test_process_recursively(self):
image = _create_image_from_file(TEST_IMAGE_PATH)
value = {"image": image, "images": [image, image], "object": {"image": image, "other_data": "other_data"}}
process_funcs = {Image: lambda x: str(x)}
updated_value = _process_recursively(value, process_funcs)
image_str = str(image)
assert updated_value == {
"image": image_str,
"images": [image_str, image_str],
"object": {"image": image_str, "other_data": "other_data"},
}
assert value != updated_value
def test_process_recursively_inplace(self):
image = _create_image_from_file(TEST_IMAGE_PATH)
value = {"image": image, "images": [image, image], "object": {"image": image, "other_data": "other_data"}}
process_funcs = {Image: lambda x: str(x)}
_process_recursively(value, process_funcs, inplace=True)
image_str = str(image)
assert value == {
"image": image_str,
"images": [image_str, image_str],
"object": {"image": image_str, "other_data": "other_data"},
}
def test_process_multimedia_dict_recursively(self):
def process_func(image_dict):
return "image_placeholder"
image_dict = {"data:image/jpg;path": "logo.jpg"}
value = {
"image": image_dict,
"images": [image_dict, image_dict],
"object": {"image": image_dict, "other_data": "other_data"},
}
updated_value = _process_multimedia_dict_recursively(value, process_func)
assert updated_value == {
"image": "image_placeholder",
"images": ["image_placeholder", "image_placeholder"],
"object": {"image": "image_placeholder", "other_data": "other_data"},
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_feature_utils.py | import pytest
from promptflow._utils.feature_utils import Feature, get_feature_list
@pytest.mark.unittest
def test_get_feature_list():
feature_list = get_feature_list()
assert isinstance(feature_list, list)
assert all(isinstance(feature, Feature) for feature in feature_list)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_tool_utils.py | import inspect
from typing import Union
import pytest
from promptflow._core._errors import DuplicateToolMappingError
from promptflow._utils.tool_utils import (
DynamicListError,
ListFunctionResponseError,
_find_deprecated_tools,
append_workspace_triple_to_func_input_params,
function_to_interface,
load_function_from_function_path,
param_to_definition,
validate_dynamic_list_func_response_type,
)
from promptflow.connections import AzureOpenAIConnection, CustomConnection
from promptflow.contracts.tool import ValueType, Tool, ToolType
# mock functions for dynamic list function testing
def mock_dynamic_list_func1():
pass
def mock_dynamic_list_func2(input1):
pass
def mock_dynamic_list_func3(input1, input2):
pass
def mock_dynamic_list_func4(input1, input2, **kwargs):
pass
def mock_dynamic_list_func5(input1, input2, subscription_id):
pass
def mock_dynamic_list_func6(input1, input2, subscription_id, resource_group_name, workspace_name):
pass
def mock_dynamic_list_func7(input1, input2, subscription_id, **kwargs):
pass
def mock_dynamic_list_func8(input1, input2, subscription_id, resource_group_name, workspace_name, **kwargs):
pass
@pytest.mark.unittest
class TestToolUtils:
def test_function_to_interface(self):
def func(conn: [AzureOpenAIConnection, CustomConnection], input: [str, int]):
pass
input_defs, _, connection_types, _ = function_to_interface(func)
assert len(input_defs) == 2
assert input_defs["conn"].type == ["AzureOpenAIConnection", "CustomConnection"]
assert input_defs["input"].type == [ValueType.OBJECT]
assert connection_types == [["AzureOpenAIConnection", "CustomConnection"]]
def test_function_to_interface_with_invalid_initialize_inputs(self):
def func(input_str: str):
pass
with pytest.raises(Exception) as exec_info:
function_to_interface(func, {"input_str": "test"})
assert "Duplicate inputs found from" in exec_info.value.args[0]
def test_function_to_interface_with_kwargs(self):
def func(input_str: str, **kwargs):
pass
_, _, _, enable_kwargs = function_to_interface(func)
assert enable_kwargs is True
def func(input_str: str):
pass
_, _, _, enable_kwargs = function_to_interface(func)
assert enable_kwargs is False
def test_param_to_definition(self):
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow.contracts.tool import Secret
class MyFirstConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
class MySecondConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
def some_func(
conn1: MyFirstConnection,
conn2: Union[CustomConnection, MyFirstConnection],
conn3: Union[MyFirstConnection, CustomConnection],
conn4: Union[MyFirstConnection, MySecondConnection],
conn5: CustomConnection,
conn6: Union[CustomConnection, int],
conn7: Union[MyFirstConnection, int],
):
pass
sig = inspect.signature(some_func)
input_def, _ = param_to_definition(sig.parameters.get("conn1"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn2"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn3"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn4"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type == ["MyFirstConnection", "MySecondConnection"]
input_def, _ = param_to_definition(sig.parameters.get("conn5"), gen_custom_type_conn=True)
assert input_def.type == ["CustomConnection"]
assert input_def.custom_type is None
input_def, _ = param_to_definition(sig.parameters.get("conn6"), gen_custom_type_conn=True)
assert input_def.type == [ValueType.OBJECT]
assert input_def.custom_type is None
input_def, _ = param_to_definition(sig.parameters.get("conn7"), gen_custom_type_conn=True)
assert input_def.type == [ValueType.OBJECT]
assert input_def.custom_type is None
@pytest.mark.parametrize(
"func, func_input_params_dict, use_ws_triple, expected_res",
[
(mock_dynamic_list_func1, None, False, {}),
(mock_dynamic_list_func2, {"input1": "value1"}, False, {"input1": "value1"}),
(
mock_dynamic_list_func3,
{"input1": "value1", "input2": "value2"},
False,
{"input1": "value1", "input2": "value2"},
),
(mock_dynamic_list_func3, {"input1": "value1"}, False, {"input1": "value1"}),
(mock_dynamic_list_func3, {"input1": "value1"}, True, {"input1": "value1"}),
(
mock_dynamic_list_func4,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func5,
{"input1": "value1"},
True,
{"input1": "value1", "subscription_id": "mock_subscription_id"},
),
(
mock_dynamic_list_func5,
{"input1": "value1", "subscription_id": "input_subscription_id"},
True,
{"input1": "value1", "subscription_id": "input_subscription_id"},
),
(
mock_dynamic_list_func6,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func6,
{
"input1": "value1",
"workspace_name": "input_workspace_name",
},
True,
{
"input1": "value1",
"workspace_name": "input_workspace_name",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
},
),
(
mock_dynamic_list_func7,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func7,
{"input1": "value1", "subscription_id": "input_subscription_id"},
True,
{
"input1": "value1",
"subscription_id": "input_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func8,
{"input1": "value1"},
True,
{
"input1": "value1",
"subscription_id": "mock_subscription_id",
"resource_group_name": "mock_resource_group",
"workspace_name": "mock_workspace_name",
},
),
(
mock_dynamic_list_func8,
{
"input1": "value1",
"subscription_id": "input_subscription_id",
"resource_group_name": "input_resource_group",
"workspace_name": "input_workspace_name",
},
True,
{
"input1": "value1",
"subscription_id": "input_subscription_id",
"resource_group_name": "input_resource_group",
"workspace_name": "input_workspace_name",
},
),
],
)
def test_append_workspace_triple_to_func_input_params(
self, func, func_input_params_dict, use_ws_triple, expected_res, mocked_ws_triple
):
ws_triple_dict = mocked_ws_triple._asdict() if use_ws_triple else None
func_sig_params = inspect.signature(func).parameters
actual_combined_inputs = append_workspace_triple_to_func_input_params(
func_sig_params=func_sig_params,
func_input_params_dict=func_input_params_dict,
ws_triple_dict=ws_triple_dict,
)
assert actual_combined_inputs == expected_res
@pytest.mark.parametrize(
"res",
[
(
[
{
"value": "fig0",
"display_value": "My_fig0",
"hyperlink": "https://www.bing.com/search?q=fig0",
"description": "this is 0 item",
},
{
"value": "kiwi1",
"display_value": "My_kiwi1",
"hyperlink": "https://www.bing.com/search?q=kiwi1",
"description": "this is 1 item",
},
]
),
([{"value": "fig0"}, {"value": "kiwi1"}]),
([{"value": "fig0", "display_value": "My_fig0"}, {"value": "kiwi1", "display_value": "My_kiwi1"}]),
(
[
{"value": "fig0", "display_value": "My_fig0", "hyperlink": "https://www.bing.com/search?q=fig0"},
{
"value": "kiwi1",
"display_value": "My_kiwi1",
"hyperlink": "https://www.bing.com/search?q=kiwi1",
},
]
),
([{"value": "fig0", "hyperlink": "https://www.bing.com/search?q=fig0"}]),
(
[
{"value": "fig0", "display_value": "My_fig0", "description": "this is 0 item"},
{
"value": "kiwi1",
"display_value": "My_kiwi1",
"hyperlink": "https://www.bing.com/search?q=kiwi1",
"description": "this is 1 item",
},
]
),
],
)
def test_validate_dynamic_list_func_response_type(self, res):
validate_dynamic_list_func_response_type(response=res, f="mock_func")
@pytest.mark.parametrize(
"res, err_msg",
[
(None, "mock_func response can not be empty."),
([], "mock_func response can not be empty."),
(["a", "b"], "mock_func response must be a list of dict. a is not a dict."),
({"a": "b"}, "mock_func response must be a list."),
([{"a": "b"}], "mock_func response dict must have 'value' key."),
([{"value": 1 + 2j}], "mock_func response dict value \\(1\\+2j\\) is not json serializable."),
],
)
def test_validate_dynamic_list_func_response_type_with_error(self, res, err_msg):
error_message = (
f"Unable to display list of items due to '{err_msg}'. \nPlease contact the tool "
f"author/support team for troubleshooting assistance."
)
with pytest.raises(ListFunctionResponseError, match=error_message):
validate_dynamic_list_func_response_type(response=res, f="mock_func")
def test_load_function_from_function_path(self, mock_module_with_list_func):
func_path = "my_tool_package.tools.tool_with_dynamic_list_input.my_list_func"
load_function_from_function_path(func_path)
def test_load_function_from_function_path_with_error(self, mock_module_with_list_func):
func_path = "mock_func_path"
with pytest.raises(
DynamicListError,
match="Unable to display list of items due to 'Failed to parse function from function path: "
"'mock_func_path'. Expected format: format 'my_module.my_func'. Detailed error: not enough "
"values to unpack \\(expected 2, got 1\\)'. \nPlease contact the tool author/support team for "
"troubleshooting assistance.",
):
load_function_from_function_path(func_path)
func_path = "fake_tool_pkg.tools.tool_with_dynamic_list_input.my_list_func"
with pytest.raises(
DynamicListError,
match="Unable to display list of items due to 'Failed to parse function from function path: "
"'fake_tool_pkg.tools.tool_with_dynamic_list_input.my_list_func'. Expected format: format "
"'my_module.my_func'. Detailed error: No module named 'fake_tool_pkg''. \nPlease contact the tool "
"author/support team for troubleshooting assistance.",
):
load_function_from_function_path(func_path)
func_path = "my_tool_package.tools.tool_with_dynamic_list_input.my_field"
with pytest.raises(
DynamicListError,
match="Unable to display list of items due to 'Failed to parse function from function path: "
"'my_tool_package.tools.tool_with_dynamic_list_input.my_field'. Expected format: "
"format 'my_module.my_func'. Detailed error: Unable to display list of items due to ''1' "
"is not callable.'. \nPlease contact the tool author/support team for troubleshooting assistance.",
):
load_function_from_function_path(func_path)
def test_find_deprecated_tools(self):
package_tools = {
"new_tool_1": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_1"]).serialize(),
"new_tool_2": Tool(
name="new tool 1", type=ToolType.PYTHON, inputs={}, deprecated_tools=["old_tool_1"]).serialize(),
}
with pytest.raises(DuplicateToolMappingError, match="secure operation"):
_find_deprecated_tools(package_tools)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_execution_utils.py | import pytest
from promptflow._utils.execution_utils import apply_default_value_for_input
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
@pytest.mark.unittest
class TestFlowExecutor:
@pytest.mark.parametrize(
"flow_inputs, inputs, expected_inputs",
[
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
None, # Could handle None input
{"input_from_default": "default_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{"input_from_default": "default_value"},
),
(
{
"input_no_default": FlowInputDefinition(type=ValueType.STRING),
},
{},
{}, # No default value for input.
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"input_from_default": "input_value", "another_key": "input_value"},
{"input_from_default": "input_value", "another_key": "input_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.BOOL, default=False),
},
{},
{"input_from_default": False},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.LIST, default=[]),
},
{},
{"input_from_default": []},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.OBJECT, default={}),
},
{},
{"input_from_default": {}},
),
],
)
def test_apply_default_value_for_input(self, flow_inputs, inputs, expected_inputs):
result = apply_default_value_for_input(flow_inputs, inputs)
assert result == expected_inputs
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_connection_utils.py | import pytest
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow._utils.connection_utils import (
generate_custom_strong_type_connection_spec,
generate_custom_strong_type_connection_template,
)
from promptflow.contracts.types import Secret
class MyCustomConnectionWithNoComments(CustomStrongTypeConnection):
api_key: Secret
api_base: str
class MyCustomConnectionWithDefaultValue(CustomStrongTypeConnection):
api_key: Secret
api_base: str = "default value of api-base"
class MyCustomConnectionWithInvalidComments(CustomStrongTypeConnection):
"""My custom connection with invalid comments.
:param api_key: The api key.
:type api_key: String
:param api_base: The api base.
:type api_base: String
:param api_key_2: The api key 2.
:type api_key_2: String
"""
api_key: Secret
api_base: str
class MyCustomConnectionMissingTypeComments(CustomStrongTypeConnection):
"""My custom connection with missing type comments.
:param api_key: The api key.
"""
api_key: Secret
api_base: str
class MyCustomConnectionMissingParamComments(CustomStrongTypeConnection):
"""My custom connection with missing param comments.
:type api_key: String
"""
api_key: Secret
api_base: str
@pytest.mark.unittest
class TestConnectionUtils:
@pytest.mark.parametrize(
"cls, expected_str_in_template",
[
(
MyCustomConnectionWithNoComments,
['api_base: "to_replace_with_api_base"\n', 'api_key: "to_replace_with_api_key"\n'],
),
(
MyCustomConnectionWithInvalidComments,
[
'api_base: "to_replace_with_api_base" # String type. The api base.\n',
'api_key: "to_replace_with_api_key" # String type. The api key.\n',
],
),
(MyCustomConnectionMissingTypeComments, ['api_key: "to_replace_with_api_key" # The api key.']),
(MyCustomConnectionMissingParamComments, ['api_key: "to_replace_with_api_key" # String type.']),
],
)
def test_generate_custom_strong_type_connection_template_with_comments(self, cls, expected_str_in_template):
package = "test-package"
package_version = "0.0.1"
spec = generate_custom_strong_type_connection_spec(cls, package, package_version)
template = generate_custom_strong_type_connection_template(cls, spec, package, package_version)
for comment in expected_str_in_template:
assert comment in template
def test_generate_custom_strong_type_connection_template_with_default_value(self):
package = "test-package"
package_version = "0.0.1"
spec = generate_custom_strong_type_connection_spec(MyCustomConnectionWithDefaultValue, package, package_version)
template = generate_custom_strong_type_connection_template(
MyCustomConnectionWithDefaultValue, spec, package, package_version
)
assert 'api_base: "default value of api-base"' in template
@pytest.mark.parametrize(
"input_value, expected_connection_names",
[
pytest.param(
"new_ai_connection",
["new_ai_connection"],
id="standard",
),
pytest.param(
"${node.output}",
[],
id="output_reference",
),
pytest.param(
"${inputs.question}",
[],
id="input_reference",
),
],
)
def test_get_used_connection_names_from_flow_meta(self, input_value: str, expected_connection_names: list):
from promptflow._sdk._submitter.utils import SubmitterHelper
connection_names = SubmitterHelper.get_used_connection_names(
{
"package": {
"(Promptflow.Tools)Promptflow.Tools.BuiltInTools.AOAI.Chat": {
"name": "Promptflow.Tools.BuiltInTools.AOAI.Chat",
"type": "csharp",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"prompt": {"type": ["string"]},
"deployment_name": {"type": ["string"]},
"objects": {"type": ["object"]},
},
"description": "",
"class_name": "AOAI",
"module": "Promptflow.Tools.BuiltInTools.AOAI",
"function": "Chat",
"is_builtin": True,
"package": "Promptflow.Tools",
"package_version": "0.0.14.0",
"toolId": "(Promptflow.Tools)Promptflow.Tools.BuiltInTools.AOAI.Chat",
},
},
"code": {},
},
{
"nodes": [
{
"name": "get_summarized_text_content",
"type": "csharp",
"source": {
"type": "package",
"tool": "(Promptflow.Tools)Promptflow.Tools.BuiltInTools.AOAI.Chat",
},
"inputs": {
"connection": input_value,
},
},
]
},
)
assert connection_names == expected_connection_names
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_multimedia_data_converter.py | from pathlib import Path
from unittest.mock import Mock
import pytest
from promptflow._utils.multimedia_data_converter import (
AbstractMultimediaInfoConverter,
MultimediaConverter,
MultimediaFormatAdapter20231201,
MultimediaInfo,
ResourceType,
)
@pytest.mark.unittest
class TestMultimediaConverter:
def test_convert_content_recursively(self):
converter = MultimediaConverter(Path("flow.yaml"))
# Don't convert anything.
content = {
"image": {"data:image/jpg;url": "https://example.com/logo.jpg"},
"images": [
{"data:image/jpg;url": "https://example.com/logo.jpg"},
{"data:image/jpg;base64": "base64 string"},
],
"object": {"image": {"data:image/png;path": "random_path"}, "other_data": "other_data"},
}
mock_converter = Mock(spec=AbstractMultimediaInfoConverter)
mock_converter.convert.side_effect = lambda x: x
result = converter.convert_content_recursively(content, mock_converter)
assert result == content
# Convert all valid images.
mock_converter.convert.side_effect = lambda x: MultimediaInfo("image/jpg", ResourceType("path"), "logo.jpg")
result = converter.convert_content_recursively(content, mock_converter)
expected_result = {
"image": {"data:image/jpg;path": "logo.jpg"},
"images": [
{"data:image/jpg;path": "logo.jpg"},
{"data:image/jpg;path": "logo.jpg"},
],
"object": {"image": {"data:image/jpg;path": "logo.jpg"}, "other_data": "other_data"},
}
assert result == expected_result
@pytest.mark.unittest
class TestMultimediaFormatAdapter20231201:
def test_is_valid_format(self):
adapter = MultimediaFormatAdapter20231201()
assert adapter.is_valid_format({"data:image/jpg;path": "logo.jpg"})
assert adapter.is_valid_format({"data:image/jpg;url": "https://example.com/logo.jpg"})
assert not adapter.is_valid_format({"data:audio/mp3;path": "audio.mp3"})
assert not adapter.is_valid_format({"data:video/mp4;url": "https://example.com/video.mp4"})
def test_extract_info(self):
adapter = MultimediaFormatAdapter20231201()
# Valid formats
expected_result = MultimediaInfo("image/jpg", ResourceType.PATH, "random_path")
assert adapter.extract_info({"data:image/jpg;path": "random_path"}) == expected_result
expected_result = MultimediaInfo("image/jpg", ResourceType.URL, "random_url")
assert adapter.extract_info({"data:image/jpg;url": "random_url"}) == expected_result
expected_result = MultimediaInfo("image/jpg", ResourceType.BASE64, "random_base64")
assert adapter.extract_info({"data:image/jpg;base64": "random_base64"}) == expected_result
# Invalid format
assert adapter.extract_info({"data:video/mp4;url": "https://example.com/video.mp4"}) is None
assert adapter.extract_info({"data:image/mp4;url2": "https://example.com/video.mp4"}) is None
assert adapter.extract_info({"content:image/mp4;path": "random_path"}) is None
def test_create_data(self):
adapter = MultimediaFormatAdapter20231201()
info = MultimediaInfo("image/jpg", ResourceType.PATH, "random_path")
expected_result = {"data:image/jpg;path": "random_path"}
assert adapter.create_data(info) == expected_result
info = MultimediaInfo("image/jpg", ResourceType.URL, "random_url")
expected_result = {"data:image/jpg;url": "random_url"}
assert adapter.create_data(info) == expected_result
info = MultimediaInfo("image/jpg", ResourceType.BASE64, "base64 string")
expected_result = {"data:image/jpg;base64": "base64 string"}
assert adapter.create_data(info) == expected_result
# Bad case when client provides invalid resource type.
info = MultimediaInfo("image/jpg", "path", "base64 string")
expected_result = {"data:image/jpg;base64": "base64 string"}
with pytest.raises(AttributeError):
adapter.create_data(info)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_exception_utils.py | import json
import re
from traceback import TracebackException
import pytest
from promptflow._core._errors import ToolExecutionError
from promptflow._core.operation_context import OperationContext
from promptflow._utils.exception_utils import (
ErrorResponse,
ExceptionPresenter,
JsonSerializedPromptflowException,
get_tb_next,
infer_error_code_from_class,
last_frame_info,
remove_suffix,
)
from promptflow.exceptions import (
ErrorTarget,
PromptflowException,
SystemErrorException,
UserErrorException,
ValidationException,
)
def set_inner_exception_by_parameter():
raise PromptflowException("test", error=ValueError("bad number"))
def set_inner_exception_by_raise_from():
raise PromptflowException("test") from ValueError("bad number")
def code_with_bug():
1 / 0
def raise_tool_execution_error():
try:
code_with_bug()
except Exception as e:
raise ToolExecutionError(node_name="MyTool") from e
def raise_exception_with_object():
raise PromptflowException(message_format="{inner_exception}", inner_exception=Exception("exception message"))
def raise_user_error():
try:
code_with_bug()
except Exception as e:
raise UserErrorException("run failed", target=ErrorTarget.TOOL) from e
def raise_context_exception():
try:
code_with_bug()
except Exception as e:
raise CustomizedContextException(e)
class CustomizedContextException(Exception):
def __init__(self, inner_exception):
self.inner_exception = inner_exception
@property
def message(self):
code_with_bug()
return "context exception"
class CustomizedException(Exception):
pass
class CustomUserError(UserErrorException):
pass
class CustomDefaultTargetError(UserErrorException):
def __init__(self, target=ErrorTarget.EXECUTOR, **kwargs):
super().__init__(target=target, **kwargs)
def raise_general_exception():
try:
code_with_bug()
except Exception as e:
raise CustomizedException("General exception") from e
def raise_promptflow_exception():
try:
code_with_bug()
except Exception as e:
raise PromptflowException("Promptflow exception") from e
def raise_promptflow_exception_without_inner_exception():
try:
code_with_bug()
except Exception:
raise PromptflowException("Promptflow exception")
TOOL_EXECUTION_ERROR_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
ZeroDivisionError: division by zero
"""
TOOL_EXCEPTION_TRACEBACK = r"""
The above exception was the direct cause of the following exception:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_.*
raise_tool_execution_error\(\)
File ".*test_exception_utils.py", line .*, in raise_tool_execution_error
raise ToolExecutionError\(node_name="MyTool"\) from e
"""
TOOL_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_tool_execution_error
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
GENERAL_EXCEPTION_TRACEBACK = r"""
The above exception was the direct cause of the following exception:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_debug_info_for_general_exception
raise_general_exception\(\)
File ".*test_exception_utils.py", line .*, in raise_general_exception
raise CustomizedException\("General exception"\) from e
"""
GENERAL_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_general_exception
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
CONTEXT_EXCEPTION_TRACEBACK = r"""
During handling of the above exception, another exception occurred:
Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in test_debug_info_for_context_exception
raise_context_exception\(\)
File ".*test_exception_utils.py", line .*, in raise_context_exception
raise CustomizedContextException\(e\)
"""
CONTEXT_EXCEPTION_INNER_TRACEBACK = r"""Traceback \(most recent call last\):
File ".*test_exception_utils.py", line .*, in raise_context_exception
code_with_bug\(\)
File ".*test_exception_utils.py", line .*, in code_with_bug
1 / 0
"""
@pytest.mark.unittest
class TestExceptionUtilsCommonMethod:
def test_get_tb_next(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
tb_next = get_tb_next(e.value.__traceback__, 3)
te = TracebackException(type(e.value), e.value, tb_next)
formatted_tb = "".join(te.format())
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, formatted_tb)
def test_last_frame_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
frame_info = last_frame_info(e.value)
assert "test_exception_utils.py" in frame_info.get("filename")
assert frame_info.get("lineno") > 0
assert frame_info.get("name") == "raise_tool_execution_error"
assert last_frame_info(None) == {}
@pytest.mark.parametrize(
"error_class, expected_error_code",
[
(UserErrorException, "UserError"),
(SystemErrorException, "SystemError"),
(ValidationException, "ValidationError"),
(ToolExecutionError, "ToolExecutionError"),
(ValueError, "ValueError"),
],
)
def test_infer_error_code_from_class(self, error_class, expected_error_code):
assert infer_error_code_from_class(error_class) == expected_error_code
@pytest.mark.unittest
class TestExceptionPresenter:
def test_debug_info(self):
# Test ToolExecutionError
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "ToolExecutionError"
assert re.match(TOOL_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_debug_info_for_context_exception(self):
with pytest.raises(CustomizedContextException) as e:
raise_context_exception()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "CustomizedContextException"
assert re.match(CONTEXT_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(CONTEXT_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_debug_info_for_general_exception(self):
# Test General Exception
with pytest.raises(CustomizedException) as e:
raise_general_exception()
presenter = ExceptionPresenter.create(e.value)
debug_info = presenter.debug_info
assert debug_info["type"] == "CustomizedException"
assert re.match(GENERAL_EXCEPTION_TRACEBACK, debug_info["stackTrace"])
inner_exception = debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(GENERAL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_to_dict_for_general_exception(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=True)
assert "debugInfo" in dct
dct.pop("debugInfo")
assert dct == {
"code": "SystemError",
"message": "General exception",
"messageFormat": "",
"messageParameters": {},
"innerError": {
"code": "CustomizedException",
"innerError": None,
},
}
def test_to_dict_for_promptflow_exception(self):
with pytest.raises(PromptflowException) as e:
raise_promptflow_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=False)
assert dct == {
"code": "SystemError",
"message": "Promptflow exception",
"messageFormat": "",
"messageParameters": {},
"referenceCode": "Unknown",
"innerError": {
"code": "ZeroDivisionError",
"innerError": None,
},
}
def test_to_dict_for_promptflow_exception_without_inner_exception(self):
with pytest.raises(PromptflowException) as e:
raise_promptflow_exception_without_inner_exception()
presenter = ExceptionPresenter.create(e.value)
dct = presenter.to_dict(include_debug_info=False)
assert dct == {
"code": "SystemError",
"message": "Promptflow exception",
"messageFormat": "",
"messageParameters": {},
"referenceCode": "Unknown",
"innerError": None,
}
def test_to_dict_for_tool_execution_error(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
presenter = ExceptionPresenter.create(e.value)
assert re.search(TOOL_EXCEPTION_INNER_TRACEBACK, presenter.formatted_traceback)
assert re.search(TOOL_EXCEPTION_TRACEBACK, presenter.formatted_traceback)
dct = presenter.to_dict(include_debug_info=False)
assert dct.pop("additionalInfo") is not None
assert dct == {
"code": "UserError",
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
@pytest.mark.parametrize(
"raise_exception_func, error_class, expected_error_codes",
[
(raise_general_exception, CustomizedException, ["SystemError", "CustomizedException"]),
(raise_tool_execution_error, ToolExecutionError, ["UserError", "ToolExecutionError"]),
(raise_promptflow_exception, PromptflowException, ["SystemError", "ZeroDivisionError"]),
(raise_promptflow_exception_without_inner_exception, PromptflowException, ["SystemError"]),
],
)
def test_error_codes(self, raise_exception_func, error_class, expected_error_codes):
with pytest.raises(error_class) as e:
raise_exception_func()
presenter = ExceptionPresenter.create(e.value)
assert presenter.error_codes == expected_error_codes
@pytest.mark.unittest
class TestErrorResponse:
def test_from_error_dict(self):
error_dict = {
"code": "UserError",
"message": "Flow run failed.",
}
response = ErrorResponse.from_error_dict(error_dict)
assert response.response_code == "400"
assert response.error_codes == ["UserError"]
assert response.message == "Flow run failed."
response_dct = response.to_dict()
assert response_dct["time"] is not None
response_dct.pop("time")
component_name = response_dct.pop("componentName", None)
assert component_name == OperationContext.get_instance().get_user_agent()
assert "promptflow" in component_name
assert response_dct == {
"error": {
"code": "UserError",
"message": "Flow run failed.",
},
"correlation": None,
"environment": None,
"location": None,
}
def test_to_simplied_dict(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
error_response = ErrorResponse.from_exception(e.value)
assert error_response.to_simplified_dict() == {
"error": {
"code": "SystemError",
"message": "General exception",
}
}
def test_from_exception(self):
with pytest.raises(CustomizedException) as e:
raise_general_exception()
response = ErrorResponse.from_exception(e.value).to_dict()
assert response["time"] is not None
response.pop("time")
component_name = response.pop("componentName", None)
assert component_name == OperationContext.get_instance().get_user_agent()
assert "promptflow" in component_name
assert response == {
"error": {
"code": "SystemError",
"message": "General exception",
"messageFormat": "",
"messageParameters": {},
"innerError": {
"code": "CustomizedException",
"innerError": None,
},
},
"correlation": None,
"environment": None,
"location": None,
}
@pytest.mark.unittest
@pytest.mark.parametrize(
"input_dict, expected",
[
({"code": "firstError"}, "firstError"),
({"code": "firstError", "innerError": {}}, "firstError"),
({"code": "firstError", "innerError": {"code": "secondError"}}, "firstError/secondError"),
({"code": None, "innerError": {"code": "secondError"}}, ""),
# Dict doesn't have code in outmost will return empty string.
({"error": {"code": "firstError", "innerError": {"code": "secondError"}}}, ""),
],
)
def test_error_code_hierarchy(self, input_dict, expected):
assert ErrorResponse.from_error_dict(input_dict).error_code_hierarchy == expected
@pytest.mark.parametrize(
"error_dict, expected_innermost_error_code",
[
(
{
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
},
"ToolExecutionError",
),
({"code": "UserError", "innerError": None}, "UserError"),
({"message": "UserError", "innerError": None}, None),
],
)
def test_innermost_error_code_with_code(self, error_dict, expected_innermost_error_code):
inner_error_code = ErrorResponse.from_error_dict(error_dict).innermost_error_code
assert inner_error_code == expected_innermost_error_code
@pytest.mark.parametrize(
"error_dict, expected_additional_info",
[
({"code": "UserError"}, {}),
(
{
"code": "UserError",
"additionalInfo": [
{
"type": "test_additional_info",
"info": "This is additional info for testing.",
},
"not_dict",
{
"type": "empty_info",
},
{
"info": "Empty type",
},
{
"test": "Invalid additional info",
},
],
},
{"test_additional_info": "This is additional info for testing."},
),
],
)
def test_additional_info(self, error_dict, expected_additional_info):
error_response = ErrorResponse.from_error_dict(error_dict)
assert error_response.additional_info == expected_additional_info
assert all(error_response.get_additional_info(key) == value for key, value in expected_additional_info.items())
@pytest.mark.parametrize(
"raise_exception_func, error_class",
[
(raise_general_exception, CustomizedException),
(raise_tool_execution_error, ToolExecutionError),
],
)
def test_get_user_execution_error_info(self, raise_exception_func, error_class):
with pytest.raises(error_class) as e:
raise_exception_func()
error_repsonse = ErrorResponse.from_exception(e.value)
actual_error_info = error_repsonse.get_user_execution_error_info()
self.assert_user_execution_error_info(e.value, actual_error_info)
def assert_user_execution_error_info(self, exception, error_info):
if isinstance(exception, ToolExecutionError):
assert error_info["type"] == "ZeroDivisionError"
assert error_info["message"] == "division by zero"
assert error_info["filename"].endswith("test_exception_utils.py")
assert error_info["lineno"] > 0
assert error_info["name"] == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
error_info["traceback"],
)
# assert re.match(TOOL_EXECUTION_ERROR_TRACEBACK, error_info["traceback"])
else:
assert error_info == {}
@pytest.mark.unittest
class TestExceptions:
@pytest.mark.parametrize(
"ex, expected_message, expected_message_format, expected_message_parameters",
[
(
CustomUserError("message"),
"message",
"",
{},
),
(
CustomUserError(message="message"),
"message",
"",
{},
),
(
CustomUserError("message", target=ErrorTarget.TOOL),
"message",
"",
{},
),
(
CustomUserError(message="message", target=ErrorTarget.TOOL),
"message",
"",
{},
),
(
CustomUserError(message_format="Hello world"),
"Hello world",
"Hello world",
{},
),
(
CustomUserError(message_format="Hello {name}", name="world"),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name}", name="world", not_used="whatever"),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name}", name="world", target=ErrorTarget.TOOL),
"Hello world",
"Hello {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name} and {name}", name="world"),
"Hello world and world",
"Hello {name} and {name}",
{
"name": "world",
},
),
(
CustomUserError(message_format="Hello {name} and {name}", name="world"),
"Hello world and world",
"Hello {name} and {name}",
{
"name": "world",
},
),
(
CustomUserError(
message_format="Tool '{tool_name}' execution failed due to {error}",
tool_name="my tool",
error="bug",
),
"Tool 'my tool' execution failed due to bug",
"Tool '{tool_name}' execution failed due to {error}",
{
"tool_name": "my tool",
"error": "bug",
},
),
],
)
def test_message_and_format(self, ex, expected_message, expected_message_format, expected_message_parameters):
with pytest.raises(CustomUserError) as exc:
raise ex
assert exc.value.message == expected_message
assert exc.value.message_format == expected_message_format
assert exc.value.message_parameters == expected_message_parameters
@pytest.mark.parametrize(
"ex, expected_message, exepcted_target",
[
(
CustomDefaultTargetError(message="message", target=ErrorTarget.TOOL),
"message",
ErrorTarget.TOOL,
),
(
CustomDefaultTargetError(message="message"),
"message",
ErrorTarget.EXECUTOR,
),
],
)
def test_target_and_message(self, ex, expected_message, exepcted_target):
with pytest.raises(CustomDefaultTargetError) as exc:
raise ex
assert exc.value.message == expected_message
assert exc.value.target == exepcted_target
def test_reference_code(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
assert e.reference_code == ErrorTarget.TOOL.value
module = "promptflow_vectordb.tool.faiss_index_loopup"
e.module = module
assert e.reference_code == f"{ErrorTarget.TOOL.value}/{module}"
@pytest.mark.parametrize(
"func_that_raises_exception",
[
set_inner_exception_by_parameter,
set_inner_exception_by_raise_from,
],
)
def test_inner_exception(self, func_that_raises_exception):
with pytest.raises(PromptflowException) as e:
func_that_raises_exception()
inner_exception = e.value.inner_exception
assert isinstance(inner_exception, ValueError)
assert str(inner_exception) == "bad number"
assert str(e.value) == "test"
def test_tool_execution_error(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
inner_exception = e.value.inner_exception
assert isinstance(inner_exception, ZeroDivisionError)
assert str(inner_exception) == "division by zero"
assert e.value.message == "Execution failure in 'MyTool': (ZeroDivisionError) division by zero"
last_frame_info = e.value.tool_last_frame_info
assert "test_exception_utils.py" in last_frame_info.get("filename")
assert last_frame_info.get("lineno") > 0
assert last_frame_info.get("name") == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
e.value.tool_traceback,
)
def test_code_hierarchy(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
assert e.error_codes == ["UserError", "ToolExecutionError"]
assert ExceptionPresenter.create(e).error_code_recursed == {
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_debug_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
presenter = ExceptionPresenter.create(e)
assert presenter.debug_info["type"] == "ToolExecutionError"
assert re.match(TOOL_EXCEPTION_TRACEBACK, presenter.debug_info["stackTrace"])
inner_exception = presenter.debug_info["innerException"]
assert inner_exception["type"] == "ZeroDivisionError"
assert re.match(TOOL_EXCEPTION_INNER_TRACEBACK, inner_exception["stackTrace"])
def test_additional_info(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
additional_info = ExceptionPresenter.create(e.value).to_dict().get("additionalInfo")
assert len(additional_info) == 1
info_0 = additional_info[0]
assert info_0["type"] == "ToolExecutionErrorDetails"
info_0_value = info_0["info"]
assert info_0_value.get("type") == "ZeroDivisionError"
assert info_0_value.get("message") == "division by zero"
assert re.match(r".*test_exception_utils.py", info_0_value["filename"])
assert info_0_value.get("lineno") > 0
assert info_0_value.get("name") == "code_with_bug"
assert re.match(
r"Traceback \(most recent call last\):\n"
r' File ".*test_exception_utils.py", line .*, in code_with_bug\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
info_0_value.get("traceback"),
)
def test_additional_info_for_empty_inner_error(self):
ex = ToolExecutionError(node_name="Node1")
dct = ExceptionPresenter.create(ex).to_dict()
additional_info = dct.get("additionalInfo")
assert additional_info is None
def test_additional_info_for_empty_case(self):
with pytest.raises(UserErrorException) as e:
raise_user_error()
dct = ExceptionPresenter.create(e.value).to_dict()
additional_info = dct.get("additionalInfo")
assert additional_info is None
@pytest.mark.parametrize("include_debug_info", [True, False])
def test_to_dict_turning_on_or_off_debug_info(self, include_debug_info):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
result = ExceptionPresenter.create(e).to_dict(include_debug_info=include_debug_info)
if include_debug_info:
assert "debugInfo" in result
else:
assert "debugInfo" not in result
def test_to_dict(self):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
e = e.value
# We do not check include_debug_info=True since the traceback already checked in other cases
result = ExceptionPresenter.create(e).to_dict(include_debug_info=False)
# Wo do not check additonalInfo since it is already checked in other cases
result.pop("additionalInfo")
assert result == {
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_to_dict_object_parameter(self):
with pytest.raises(PromptflowException) as e:
raise_exception_with_object()
e = e.value
# We do not check include_debug_info=True since the traceback already checked in other cases
result = ExceptionPresenter.create(e).to_dict(include_debug_info=False)
# Assert message is str(exception)
assert result == {
"message": "exception message",
"messageFormat": "{inner_exception}",
"messageParameters": {"inner_exception": "exception message"},
"referenceCode": "Unknown",
"code": "SystemError",
"innerError": None,
}
@pytest.mark.parametrize("include_debug_info", [True, False])
def test_to_dict_for_JsonSerializedPromptflowException(self, include_debug_info):
with pytest.raises(ToolExecutionError) as e:
raise_tool_execution_error()
exception_dict = ExceptionPresenter.create(e.value).to_dict(include_debug_info=True)
message = json.dumps(exception_dict)
exception = JsonSerializedPromptflowException(message=message)
assert str(exception) == message
json_serialized_exception_dict = ExceptionPresenter.create(exception).to_dict(
include_debug_info=include_debug_info
)
error_dict = exception.to_dict(include_debug_info=include_debug_info)
assert error_dict == json_serialized_exception_dict
if include_debug_info:
assert "debugInfo" in error_dict
error_dict.pop("debugInfo")
error_dict.pop("additionalInfo")
assert error_dict == {
"code": "UserError",
"message": "Execution failure in 'MyTool': (ZeroDivisionError) division by zero",
"messageFormat": "Execution failure in '{node_name}'.",
"messageParameters": {"node_name": "MyTool"},
"referenceCode": "Tool",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
def test_remove_suffix(self):
assert remove_suffix('PackageToolNotFoundError.', '.') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', 'Error') == 'PackageToolNotFound'
assert remove_suffix('PackageToolNotFoundError', 'PackageToolNotFoundError') == ''
assert remove_suffix('PackageToolNotFoundError', 'NonExistedSuffix') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', '') == 'PackageToolNotFoundError'
assert remove_suffix('PackageToolNotFoundError', None) == 'PackageToolNotFoundError'
assert remove_suffix('', 'NonExistedSuffix') == ''
assert remove_suffix(None, 'NonExistedSuffix') is None
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_logger_utils.py | import io
import logging
import time
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import Mock
from uuid import uuid4
import pytest
from promptflow._utils.credential_scrubber import CredentialScrubber
from promptflow._utils.logger_utils import (
CredentialScrubberFormatter,
FileHandler,
FileHandlerConcurrentWrapper,
LogContext,
bulk_logger,
scrub_credentials,
update_log_path,
update_single_log_path,
)
from promptflow.contracts.run_mode import RunMode
from ...utils import load_content
def _set_handler(logger: logging.Logger, handler: FileHandler, log_content: str):
for h in logger.handlers:
if isinstance(h, FileHandlerConcurrentWrapper):
h.handler = handler
time.sleep(1)
logger.warning(log_content)
h.clear()
class DummyException(Exception):
pass
@pytest.fixture
def logger():
logger = logging.getLogger(str(uuid4()))
logger.setLevel(logging.INFO)
return logger
@pytest.fixture
def stream_handler():
stream = io.StringIO()
return logging.StreamHandler(stream)
@pytest.mark.unittest
class TestCredentialScrubberFormatter:
def test_log(self, logger, stream_handler):
"""Make sure credentials by logger.log are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("testinfo&sig=signature")
logger.error("testerror&key=accountkey")
logger.warning("testwarning&sig=signature")
logger.critical("print dummy secret")
expected_log_output = (
f"testinfo&sig={CredentialScrubber.PLACE_HOLDER}\n"
f"testerror&key={CredentialScrubber.PLACE_HOLDER}\n"
f"testwarning&sig={CredentialScrubber.PLACE_HOLDER}\n"
f"print {CredentialScrubber.PLACE_HOLDER}\n"
)
assert stream_handler.stream.getvalue() == expected_log_output
def test_log_with_args(self, logger, stream_handler):
"""Make sure credentials by logger.log (in args) are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("testinfo&sig=%s credential=%s", "signature", "dummy secret")
expected_log_output = (
f"testinfo&sig={CredentialScrubber.PLACE_HOLDER} " f"credential={CredentialScrubber.PLACE_HOLDER}\n"
)
assert stream_handler.stream.getvalue() == expected_log_output
def test_log_with_exc_info(self, logger, stream_handler):
"""Make sure credentials in exception are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
exception = DummyException("credential=dummy secret accountkey=accountkey")
logger.exception("test exception", exc_info=exception)
expected_log_output = "credential=**data_scrubbed** accountkey=**data_scrubbed**"
assert expected_log_output in stream_handler.stream.getvalue()
def test_set_credential_list_thread_safe(self):
formatter = CredentialScrubberFormatter()
def set_and_check_credential_list(credential_list):
formatter.set_credential_list(credential_list)
time.sleep(1)
assert formatter.credential_scrubber.custom_str_set == set(credential_list)
with ThreadPool(processes=3) as pool:
results = pool.map(set_and_check_credential_list, [[f"secret {i}", f"credential {i}"] for i in range(3)])
_ = list(results)
@pytest.mark.unittest
class TestFileHandlerConcurrentWrapper:
def test_set_handler_thread_safe(self):
wrapper = FileHandlerConcurrentWrapper()
logger = logging.getLogger("test execution log handler")
logger.addHandler(wrapper)
process_num = 3
folder_path = Path(mkdtemp())
log_path_list = [str(folder_path / f"log_{i}.log") for i in range(process_num)]
with ThreadPool(processes=process_num) as pool:
results = pool.starmap(
_set_handler, ((logger, FileHandler(log_path_list[i]), f"log {i}") for i in range(process_num))
)
results = list(results)
# Make sure log content is as expected.
for i, log_path in enumerate(log_path_list):
with open(log_path, "r") as f:
log = f.read()
log_lines = log.split("\n")
assert len(log_lines) == 2
assert f"log {i}" in log_lines[0]
assert log_lines[1] == ""
def test_clear(self):
wrapper = FileHandlerConcurrentWrapper()
assert wrapper.handler is None
log_path = str(Path(mkdtemp()) / "logs.log")
file_handler = FileHandler(log_path)
file_handler.close = Mock(side_effect=Exception("test exception"))
wrapper.handler = file_handler
wrapper.clear()
assert wrapper.handler is None
@pytest.mark.unittest
class TestLogContext:
def test_context_manager(self):
log_handler = FileHandlerConcurrentWrapper()
logger = logging.getLogger("test_setup_logger_context")
logger.addHandler(log_handler)
log_path = str(Path(mkdtemp()) / "test.log")
try:
log_context_initializer = LogContext(log_path).get_initializer()
log_context = log_context_initializer()
log_context.input_logger = logger
assert LogContext.get_current() is None
with log_context:
assert LogContext.get_current() is not None
# Make sure context variables are set.
inner_handler = log_handler._context_var.get()
assert isinstance(inner_handler, FileHandler)
assert isinstance(inner_handler._formatter, CredentialScrubberFormatter)
scrubber = inner_handler._formatter._context_var.get()
assert scrubber is not None
logger.warning("Print %s", "&sig=signature")
# Raise exception for test.
raise DummyException("Raise exception for test.")
except DummyException:
pass
# Make sure log content is as expected.
with open(log_path, "r") as f:
log_content = f.read()
assert f"Print &sig={CredentialScrubber.PLACE_HOLDER}" in log_content
# Make sure context variables are cleaned up.
assert log_handler._context_var.get() is None
def test_empty_file_path(self, logger, stream_handler):
logger.addHandler(stream_handler)
logger.addHandler(FileHandlerConcurrentWrapper())
with LogContext("", input_logger=logger):
logger.info("test log")
assert stream_handler.stream.getvalue() == "test log\n"
def test_update_log_path(self):
log_handler = FileHandlerConcurrentWrapper()
input_logger = logging.getLogger("input_logger")
input_logger.addHandler(log_handler)
folder_path = Path(mkdtemp())
original_log_path = str(folder_path / "original_log.log")
with LogContext(original_log_path, input_logger=input_logger, run_mode=RunMode.Batch):
bulk_logger.info("test log")
input_logger.warning("test input log")
original_log = load_content(original_log_path)
keywords = ["test log", "test input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in original_log for keyword in keywords)
# Update log path
log_path = str(folder_path / "log_without_input_logger.log")
update_log_path(log_path, input_logger)
bulk_logger.info("test update log")
input_logger.warning("test update input log")
log = load_content(log_path)
keywords = ["test update log", "test update input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in log for keyword in keywords)
def test_update_single_log_path(self):
log_handler = FileHandlerConcurrentWrapper()
input_logger = logging.getLogger("input_logger")
input_logger.addHandler(log_handler)
folder_path = Path(mkdtemp())
original_log_path = str(folder_path / "original_log.log")
with LogContext(original_log_path, input_logger=input_logger, run_mode=RunMode.Batch):
bulk_logger.info("test log")
input_logger.warning("test input log")
original_log = load_content(original_log_path)
keywords = ["test log", "test input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in original_log for keyword in keywords)
# Update log path
bulk_log_path = str(folder_path / "update_bulk_log.log")
update_single_log_path(bulk_log_path, bulk_logger)
input_log_path = str(folder_path / "update_input_log.log")
update_single_log_path(input_log_path, input_logger)
bulk_logger.info("test update log")
input_logger.warning("test update input log")
bulk_log = load_content(bulk_log_path)
input_log = load_content(input_log_path)
bulk_keywords = ["test update log", "execution.bulk", "INFO"]
input_keywords = ["test update input log", "input_logger", "WARNING"]
assert all(keyword in bulk_log for keyword in bulk_keywords)
assert all(keyword not in bulk_log for keyword in input_keywords)
assert all(keyword in input_log for keyword in input_keywords)
assert all(keyword not in input_log for keyword in bulk_keywords)
def test_scrub_credentials(self):
log_content = "sig=signature&key=accountkey"
folder_path = Path(mkdtemp())
logs_path = str(folder_path / "logs.log")
scrubbed_log_content = scrub_credentials(log_content)
assert scrubbed_log_content == "sig=**data_scrubbed**&key=**data_scrubbed**"
with LogContext(logs_path):
scrubbed_log_content = scrub_credentials(log_content)
assert scrubbed_log_content == "sig=**data_scrubbed**&key=**data_scrubbed**"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_thread_utils.py | import re
import sys
import time
from io import StringIO
from logging import WARNING, Logger, StreamHandler
import pytest
from promptflow._utils.thread_utils import RepeatLogTimer
from promptflow._utils.utils import generate_elapsed_time_messages
class DummyException(Exception):
pass
@pytest.mark.skipif(sys.platform == "darwin", reason="Skip on Mac")
@pytest.mark.unittest
class TestRepeatLogTimer:
def test_context_manager(self):
s = StringIO()
logger = Logger("test_repeat_log_timer")
logger.addHandler(StreamHandler(s))
interval_seconds = 1
start_time = time.perf_counter()
with RepeatLogTimer(
interval_seconds=interval_seconds,
logger=logger,
level=WARNING,
log_message_function=generate_elapsed_time_messages,
args=("Test", start_time, interval_seconds, None),
):
time.sleep(10.5)
logs = s.getvalue().split("\n")
logs = [log for log in logs if log]
log_pattern = re.compile(
r"^Test has been running for [0-9]+ seconds, thread None cannot be found in sys._current_frames, "
r"maybe it has been terminated due to unexpected errors.$"
)
assert logs, "Logs are empty."
for log in logs:
assert re.match(log_pattern, log), f"The wrong log: {log}"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_credential_scrubber.py | import pytest
from promptflow._utils.credential_scrubber import CredentialScrubber
def mock_connection_string():
connection_str_before_key = "DefaultEndpointsProtocol=https;AccountName=accountName;"
connection_str_after_key = "EndpointSuffix=core.windows.net"
return (
f"{connection_str_before_key}AccountKey=accountKey;{connection_str_after_key}",
f"{connection_str_before_key}AccountKey={CredentialScrubber.PLACE_HOLDER};{connection_str_after_key}",
)
def mock_sas_uri():
uri_without_signature = "https://bloburi/containerName/file.txt?sv=2021-10-04&se=2023-05-17&sr=b&sp=rw"
return (f"{uri_without_signature}&sig=signature", f"{uri_without_signature}&sig={CredentialScrubber.PLACE_HOLDER}")
@pytest.mark.unittest
class TestCredentialScrubber:
def test_scrub_sigature_in_sasuri(self):
input_str, ground_truth = mock_sas_uri()
assert CredentialScrubber().scrub(input_str) == ground_truth
def test_scrub_key_in_connection_string(self):
input_str, ground_truth = mock_connection_string()
output = CredentialScrubber().scrub(input_str)
assert output == ground_truth
def test_add_regex(self):
scrubber = CredentialScrubber()
scrubber.add_regex(r"(?<=credential=)[^\s;&]+")
assert scrubber.scrub("test&credential=credential") == f"test&credential={CredentialScrubber.PLACE_HOLDER}"
def test_add_str(self):
scrubber = CredentialScrubber()
scrubber.add_str(None)
assert len(scrubber.custom_str_set) == 0
scrubber.add_str("credential")
assert len(scrubber.custom_str_set) == 1
assert scrubber.scrub("test&secret=credential") == f"test&secret={CredentialScrubber.PLACE_HOLDER}"
def test_add_str_length_threshold(self):
"""If the secret is too short (length <= 2 chars), it will not be scrubbed."""
scrubber = CredentialScrubber()
scrubber.add_str("yy")
assert scrubber.scrub("test&secret=yy") == "test&secret=yy"
def test_normal_str_not_affected(self):
assert CredentialScrubber().scrub("no secret") == "no secret"
def test_clear(self):
scrubber = CredentialScrubber()
scrubber.add_str("credential")
scrubber.add_regex(r"(?<=credential=)[^\s;&]+")
assert len(scrubber.custom_str_set) == 1
assert len(scrubber.custom_regex_set) == 1
scrubber.clear()
assert len(scrubber.custom_str_set) == 0
assert len(scrubber.custom_regex_set) == 0
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_generate_tool_meta_utils.py | import os
import re
import sys
from multiprocessing import Pool
from pathlib import Path
from unittest.mock import patch
import pytest
from promptflow._core.tool_meta_generator import (
JinjaParsingError,
MultipleToolsDefined,
NoToolDefined,
PythonLoadError,
PythonParsingError,
generate_prompt_meta,
generate_python_meta,
generate_tool_meta_dict_by_file,
)
from promptflow._utils.exception_utils import ExceptionPresenter
from ...utils import FLOW_ROOT, load_json
TEST_ROOT = Path(__file__).parent.parent.parent.parent
TOOLS_ROOT = TEST_ROOT / "test_configs/wrong_tools"
def cd_and_run(working_dir, source_path, tool_type):
os.chdir(working_dir)
sys.path.insert(0, working_dir)
try:
return generate_tool_meta_dict_by_file(source_path, tool_type)
except Exception as e:
return f"({e.__class__.__name__}) {e}"
def cd_and_run_with_read_text_error(working_dir, source_path, tool_type):
def mock_read_text_error(self: Path, *args, **kwargs):
raise Exception("Mock read text error.")
os.chdir(working_dir)
sys.path.insert(0, working_dir)
try:
with patch("promptflow._core.tool_meta_generator.Path.read_text", new=mock_read_text_error):
return generate_tool_meta_dict_by_file(source_path, tool_type)
except Exception as e:
return f"({e.__class__.__name__}) {e}"
def cd_and_run_with_bad_function_interface(working_dir, source_path, tool_type):
def mock_function_to_interface(*args, **kwargs):
raise Exception("Mock function to interface error.")
os.chdir(working_dir)
sys.path.insert(0, working_dir)
try:
with patch("promptflow._core.tool_meta_generator.function_to_interface", new=mock_function_to_interface):
return generate_tool_meta_dict_by_file(source_path, tool_type)
except Exception as e:
return f"({e.__class__.__name__}) {e}"
def generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, func):
with Pool(1) as pool:
return pool.apply(func, (wd, tool_path, tool_type))
@pytest.mark.unittest
class TestToolMetaUtils:
@pytest.mark.parametrize(
"flow_dir, tool_path, tool_type",
[
("prompt_tools", "summarize_text_content_prompt.jinja2", "prompt"),
("prompt_tools", "summarize_text_content_prompt.jinja2", "llm"),
("script_with_import", "dummy_utils/main.py", "python"),
("script_with___file__", "script_with___file__.py", "python"),
("script_with_special_character", "script_with_special_character.py", "python"),
],
)
def test_generate_tool_meta_dict_by_file(self, flow_dir, tool_path, tool_type):
wd = str((FLOW_ROOT / flow_dir).resolve())
meta_dict = generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, cd_and_run)
assert isinstance(meta_dict, dict), "Call cd_and_run failed:\n" + meta_dict
target_file = (Path(wd) / tool_path).with_suffix(".meta.json")
expected_dict = load_json(target_file)
if tool_type == "llm":
expected_dict["type"] = "llm" # We use prompt as default for jinja2
assert meta_dict == expected_dict
@pytest.mark.parametrize(
"flow_dir, tool_path, tool_type, func, msg_pattern",
[
pytest.param(
"prompt_tools",
"summarize_text_content_prompt.jinja2",
"python",
cd_and_run,
r"\(PythonLoaderNotFound\) Failed to load python file '.*summarize_text_content_prompt.jinja2'. "
r"Please make sure it is a valid .py file.",
id="PythonLoaderNotFound",
),
pytest.param(
"script_with_import",
"fail.py",
"python",
cd_and_run,
r"\(PythonLoadError\) Failed to load python module from file '.*fail.py': "
r"\(ModuleNotFoundError\) No module named 'aaa'",
id="PythonLoadError",
),
pytest.param(
"simple_flow_with_python_tool",
"divide_num.py",
"python",
cd_and_run_with_bad_function_interface,
r"\(BadFunctionInterface\) Parse interface for tool 'divide_num' failed: "
r"\(Exception\) Mock function to interface error.",
id="BadFunctionInterface",
),
pytest.param(
"script_with_import",
"aaa.py",
"python",
cd_and_run,
r"\(MetaFileNotFound\) Generate tool meta failed for python tool. "
r"Meta file 'aaa.py' can not be found.",
id="MetaFileNotFound",
),
pytest.param(
"simple_flow_with_python_tool",
"divide_num.py",
"python",
cd_and_run_with_read_text_error,
r"\(MetaFileReadError\) Generate tool meta failed for python tool. "
r"Read meta file 'divide_num.py' failed: \(Exception\) Mock read text error.",
id="MetaFileReadError",
),
pytest.param(
"simple_flow_with_python_tool",
"divide_num.py",
"action",
cd_and_run,
r"\(NotSupported\) Generate tool meta failed. The type 'action' is currently unsupported. "
r"Please choose from available types: python,llm,prompt and try again.",
id="NotSupported",
),
],
)
def test_generate_tool_meta_dict_by_file_exception(self, flow_dir, tool_path, tool_type, func, msg_pattern):
wd = str((FLOW_ROOT / flow_dir).resolve())
ret = generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, func)
assert isinstance(ret, str), "Call cd_and_run should fail but succeeded:\n" + str(ret)
assert re.match(msg_pattern, ret)
@pytest.mark.parametrize(
"content, error_code, message",
[
pytest.param(
"zzz",
PythonParsingError,
"Failed to load python module. Python parsing failed: (NameError) name 'zzz' is not defined",
id="PythonParsingError_NameError",
),
pytest.param(
"# Nothing",
NoToolDefined,
"No tool found in the python script. "
"Please make sure you have one and only one tool definition in your script.",
id="NoToolDefined",
),
pytest.param(
"multiple_tools.py",
MultipleToolsDefined,
"Expected 1 but collected 2 tools: tool1, tool2. "
"Please make sure you have one and only one tool definition in your script.",
id="MultipleToolsDefined",
),
pytest.param(
"{% zzz",
PythonParsingError,
"Failed to load python module. Python parsing failed: "
"(SyntaxError) invalid syntax (<string>, line 1)",
id="PythonParsingError_SyntaxError",
),
],
)
def test_custom_python_meta(self, content, error_code, message) -> None:
if content.endswith(".py"):
source = TOOLS_ROOT / content
with open(source, "r") as f:
code = f.read()
else:
code = content
source = None
with pytest.raises(error_code) as ex:
generate_python_meta("some_tool", code, source)
assert message == str(ex.value)
@pytest.mark.parametrize(
"content, error_code, message",
[
pytest.param(
"{% zzz",
JinjaParsingError,
"Generate tool meta failed for llm tool. Jinja parsing failed at line 1: "
"(TemplateSyntaxError) Encountered unknown tag 'zzz'.",
id="JinjaParsingError_Code",
),
pytest.param(
"no_end.jinja2",
JinjaParsingError,
"Generate tool meta failed for llm tool. Jinja parsing failed at line 2: "
"(TemplateSyntaxError) Unexpected end of template. Jinja was looking for the following tags: "
"'endfor' or 'else'. The innermost block that needs to be closed is 'for'.",
id="JinjaParsingError_File",
),
],
)
def test_custom_llm_meta(self, content, error_code, message) -> None:
if content.endswith(".jinja2"):
with open(TOOLS_ROOT / content, "r") as f:
code = f.read()
else:
code = content
with pytest.raises(error_code) as ex:
generate_prompt_meta("some_tool", code)
assert message == str(ex.value)
@pytest.mark.parametrize(
"content, error_code, message",
[
pytest.param(
"{% zzz",
JinjaParsingError,
"Generate tool meta failed for prompt tool. Jinja parsing failed at line 1: "
"(TemplateSyntaxError) Encountered unknown tag 'zzz'.",
id="JinjaParsingError_Code",
),
pytest.param(
"no_end.jinja2",
JinjaParsingError,
"Generate tool meta failed for prompt tool. Jinja parsing failed at line 2: "
"(TemplateSyntaxError) Unexpected end of template. Jinja was looking for the following tags: "
"'endfor' or 'else'. The innermost block that needs to be closed is 'for'.",
id="JinjaParsingError_File",
),
],
)
def test_custom_prompt_meta(self, content, error_code, message) -> None:
if content.endswith(".jinja2"):
with open(TOOLS_ROOT / content, "r") as f:
code = f.read()
else:
code = content
with pytest.raises(error_code) as ex:
generate_prompt_meta("some_tool", code, prompt_only=True)
assert message == str(ex.value)
@pytest.mark.unittest
class TestPythonLoadError:
def test_additional_info(self):
source = TOOLS_ROOT / "load_error.py"
with open(source, "r") as f:
code = f.read()
with pytest.raises(PythonLoadError) as ex:
generate_python_meta("some_tool", code, source)
additional_info = ExceptionPresenter.create(ex.value).to_dict().get("additionalInfo")
assert len(additional_info) == 1
info_0 = additional_info[0]
assert info_0["type"] == "UserCodeStackTrace"
info_0_value = info_0["info"]
assert info_0_value.get("type") == "ZeroDivisionError"
assert info_0_value.get("message") == "division by zero"
assert re.match(r".*load_error.py", info_0_value["filename"])
assert info_0_value.get("lineno") == 3
assert info_0_value.get("name") == "<module>"
assert re.search(
r"Traceback \(most recent call last\):\n"
r' File ".*load_error.py", line .*, in <module>\n'
r" 1 / 0\n"
r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer.
r"ZeroDivisionError: division by zero\n",
info_0_value.get("traceback"),
)
def test_additional_info_for_empty_inner_error(self):
ex = PythonLoadError(message_format="Test empty error")
additional_info = ExceptionPresenter.create(ex).to_dict().get("additionalInfo")
assert additional_info is None
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_utils.py | import pytest
import os
from unittest.mock import patch
from datetime import datetime
from promptflow._utils.utils import is_json_serializable, get_int_env_var, log_progress
class MyObj:
pass
@pytest.mark.unittest
class TestUtils:
@pytest.mark.parametrize("value, expected_res", [(None, True), (1, True), ("", True), (MyObj(), False)])
def test_is_json_serializable(self, value, expected_res):
assert is_json_serializable(value) == expected_res
@pytest.mark.parametrize(
"env_var, env_value, default_value, expected_result",
[
("TEST_VAR", "10", None, 10), # Valid integer string
("TEST_VAR", "invalid", None, None), # Invalid integer strings
("TEST_VAR", None, 5, 5), # Environment variable does not exist
("TEST_VAR", "10", 5, 10), # Valid integer string with a default value
("TEST_VAR", "invalid", 5, 5), # Invalid integer string with a default value
])
def test_get_int_env_var(self, env_var, env_value, default_value, expected_result):
with patch.dict(os.environ, {env_var: env_value} if env_value is not None else {}):
assert get_int_env_var(env_var, default_value) == expected_result
@pytest.mark.parametrize(
"env_var, env_value, expected_result",
[
("TEST_VAR", "10", 10), # Valid integer string
("TEST_VAR", "invalid", None), # Invalid integer strings
("TEST_VAR", None, None), # Environment variable does not exist
])
def test_get_int_env_var_without_default_vaue(self, env_var, env_value, expected_result):
with patch.dict(os.environ, {env_var: env_value} if env_value is not None else {}):
assert get_int_env_var(env_var) == expected_result
@patch('promptflow.executor._line_execution_process_pool.bulk_logger', autospec=True)
def test_log_progress(self, mock_logger):
run_start_time = datetime.utcnow()
count = 1
# Tests do not log when not specified at specified intervals (interval = 2)
total_count = 20
log_progress(run_start_time, mock_logger, count, total_count)
mock_logger.info.assert_not_called()
# Test logging at specified intervals (interval = 2)
count = 8
log_progress(run_start_time, mock_logger, count, total_count)
mock_logger.info.assert_any_call("Finished 8 / 20 lines.")
mock_logger.reset_mock()
# Test logging using last_log_count parameter (conut - last_log_count > interval(2))
log_progress(run_start_time, mock_logger, count, total_count, last_log_count=5)
mock_logger.info.assert_any_call("Finished 8 / 20 lines.")
mock_logger.reset_mock()
# Test don't log using last_log_count parameter ((conut - last_log_count < interval(2))
log_progress(run_start_time, mock_logger, count, total_count, last_log_count=7)
mock_logger.info.assert_not_called()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/executor/unittests | promptflow_repo/promptflow/src/promptflow/tests/executor/unittests/_utils/test_dataclass_serializer.py | import pytest
from datetime import datetime
from dataclasses import dataclass
from typing import Dict, List
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._utils.dataclass_serializer import \
get_type, serialize, deserialize_dataclass, deserialize_value, assertEqual
from promptflow.contracts.run_info import RunInfo, Status
from promptflow._core.connection_manager import ConnectionManager
from promptflow.storage.run_records import NodeRunRecord
from unittest.mock import patch, Mock
import sys
def get_connection_dict():
return {
"azure_open_ai_connection": {
"type": "AzureOpenAIConnection",
"value": {
"api_key": "<azure-openai-key>",
"api_base": "<aoai-api-endpoint>",
"api_type": "azure",
"api_version": "2023-07-01-preview",
},
},
"custom_connection": {
"type": "CustomConnection",
"value": {
"api_key": "<your-key>",
"url": "<connection-endpoint>",
},
"module": "promptflow.connections",
"secret_keys": ["api_key"],
},
}
@pytest.mark.unittest
@pytest.mark.parametrize(
"type_input, expected",
[
(NodeRunRecord, NodeRunRecord),
([NodeRunRecord], List[NodeRunRecord]),
(dict(a=NodeRunRecord), Dict[str, NodeRunRecord]),
(int, int),
(str, str),
]
)
def test_get_type(type_input, expected):
assert get_type(type_input) == expected
@pytest.mark.unittest
def test_serialize_dataclass():
start_time = datetime(2023, 9, 4)
end_time = datetime(2023, 9, 4)
node_run_info = RunInfo(
node=None,
run_id=None,
flow_run_id=None,
status=Status.Completed,
inputs=None,
output=None,
metrics=None,
error=None,
parent_run_id=None,
start_time=start_time,
end_time=end_time,
index=0,
)
node_record = NodeRunRecord.from_run_info(node_run_info)
serialized_info = serialize(node_run_info)
serialized_record = serialize(node_record)
# test dataclass without serialize attribute
assert serialized_info['status'] == "Completed"
assert serialized_info['start_time'] == "2023-09-04T00:00:00Z"
assert deserialize_value(serialized_info, RunInfo) == node_run_info
# test dataclass with serialize attribute
assert serialized_record == node_record.serialize()
@pytest.mark.unittest
@pytest.mark.parametrize(
"value, value_type, expected",
[
(datetime(2023, 9, 4), datetime, "2023-09-04T00:00:00Z"),
(Status.Completed, Status, "Completed"),
([1, 2, 3], List[int], [1, 2, 3]),
({"a": 1, "b": 2}, Dict[str, int], {"a": 1, "b": 2}),
(1, int, 1),
("a", str, "a"),
]
)
def test_serialize_value(value, value_type, expected):
assert serialize(value) == expected
assert deserialize_value(serialize(value), value_type) == value
@pytest.mark.unittest
def test_serialize_remove_null():
value = {"a": 1, "b": None}
value_type = Dict[str, int]
assert deserialize_value(serialize(value, remove_null=True), value_type) == {"a": 1, "b": None}
@dataclass
class DummyDataClass:
name: str
age: int
assert serialize(DummyDataClass("Dummy", None), remove_null=True) == {'name': 'Dummy'}
@pytest.mark.unittest
def test_serialize_connection():
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
assert serialize(connection_manager.get("azure_open_ai_connection")) == "azure_open_ai_connection"
@pytest.mark.unittest
def test_serialize_generator():
def generator():
for i in range(3):
yield i
g = GeneratorProxy(generator())
next(g)
assert serialize(g) == [0]
@pytest.mark.unittest
@patch.dict('sys.modules', {'pydantic': None})
def test_import_pydantic_error():
# mock pydantic is not installed
class DummyClass:
def __init__(self, name, age):
self.name = name
self.age = age
dummy = DummyClass('Test', 20)
assert serialize(dummy) == dummy
@pytest.mark.unittest
@patch.dict('sys.modules', {'pydantic': Mock()})
def test_import_pydantic():
# mock pydantic is installed
class MockBaseModel:
def dict(self):
return {"key": "value"}
mock_value = MockBaseModel()
sys.modules['pydantic'].BaseModel = MockBaseModel
assert serialize(mock_value) == mock_value.dict()
assert serialize(123) == 123
@pytest.mark.unittest
def test_deserialize_dataclass():
# test when cls is not dataclass
with pytest.raises(ValueError):
deserialize_dataclass(int, 1)
# test when data is not a dict
with pytest.raises(ValueError):
deserialize_dataclass(NodeRunRecord, "NodeRunRecord")
@dataclass
class DummyDataClassWithDefault:
name: str = "Default Name"
age: int = 0
# test deserialize dataclass with default value
data = {"age": 25}
obj = deserialize_dataclass(DummyDataClassWithDefault, data)
assert obj.name == "Default Name"
assert obj.age == 25
@pytest.mark.unittest
@pytest.mark.parametrize(
"a, b, expected",
[
(1, 2, 1),
(Status.Completed, Status, Status.Completed),
(None, datetime, None),
("2022-01-01T00:00:00", datetime, datetime.fromisoformat("2022-01-01T00:00:00")),
]
)
def test_deserialize_value(a, b, expected):
assert deserialize_value(a, b) == expected
@pytest.mark.unittest
@pytest.mark.parametrize(
"a, b, path, are_equal",
[
# Test with identical dicts
({'key1': 'value1', 'key2': 'value2'}, {'key1': 'value1', 'key2': 'value2'}, \
"unittests/_utils/test_dataclass_serializer", True),
# Test with non-identical dicts
({'key1': 'value1', 'key2': 'value2'}, {'key1': 'value1', 'key3': 'value3'}, \
"unittests/_utils/test_dataclass_serializer", False),
# Test with identical lists
(['item1', 'item2'], ['item1', 'item2'], "", True),
# Test with non-identical lists
(['item1', 'item2'], ['item1', 'item3'], "", False),
# Test with other types
(1, 1, "", True),
(1, 2, "", False),
('string', 'string', "", True),
('string1', 'string2', "", False),
]
)
def test_assertEqual(a, b, path, are_equal):
if are_equal:
assertEqual(a, b, path)
else:
with pytest.raises(AssertionError):
assertEqual(a, b, path)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/conftest.py | import base64
import json
import multiprocessing
import os
from pathlib import Path
from unittest.mock import patch
import pytest
from mock import mock
from pytest_mock import MockerFixture
from sqlalchemy import create_engine
from promptflow import PFClient
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._constants import EXPERIMENT_CREATED_ON_INDEX_NAME, EXPERIMENT_TABLE_NAME, LOCAL_MGMT_DB_PATH
from promptflow._sdk._serving.app import create_app as create_serving_app
from promptflow._sdk.entities import AzureOpenAIConnection as AzureOpenAIConnectionEntity
from promptflow._sdk.entities._connection import CustomConnection, _Connection
from promptflow.executor._line_execution_process_pool import _process_wrapper
from promptflow.executor._process_manager import create_spawned_fork_process_manager
from .recording_utilities import RecordStorage, mock_tool, recording_array_extend, recording_array_reset
PROMOTFLOW_ROOT = Path(__file__) / "../../.."
RUNTIME_TEST_CONFIGS_ROOT = Path(PROMOTFLOW_ROOT / "tests/test_configs/runtime")
RECORDINGS_TEST_CONFIGS_ROOT = Path(PROMOTFLOW_ROOT / "tests/test_configs/node_recordings").resolve()
CONNECTION_FILE = (PROMOTFLOW_ROOT / "connections.json").resolve().absolute().as_posix()
MODEL_ROOT = Path(PROMOTFLOW_ROOT / "tests/test_configs/flows")
@pytest.fixture(scope="session")
def local_client() -> PFClient:
yield PFClient()
@pytest.fixture(scope="session")
def pf() -> PFClient:
yield PFClient()
@pytest.fixture()
def local_aoai_connection(local_client, azure_open_ai_connection):
conn = AzureOpenAIConnectionEntity(
name="azure_open_ai_connection",
api_key=azure_open_ai_connection.api_key,
api_base=azure_open_ai_connection.api_base,
)
local_client.connections.create_or_update(conn)
return conn
@pytest.fixture()
def local_alt_aoai_connection(local_client, azure_open_ai_connection):
conn = AzureOpenAIConnectionEntity(
name="new_ai_connection",
api_key=azure_open_ai_connection.api_key,
api_base=azure_open_ai_connection.api_base,
)
local_client.connections.create_or_update(conn)
return conn
@pytest.fixture()
def local_custom_connection(local_client, azure_open_ai_connection):
conn = CustomConnection(
name="test_custom_connection",
secrets={"test_secret": "test_value"},
)
local_client.connections.create_or_update(conn)
return conn
_connection_setup = False
@pytest.fixture
def setup_local_connection(local_client, azure_open_ai_connection):
global _connection_setup
if _connection_setup:
return
connection_dict = json.loads(open(CONNECTION_FILE, "r").read())
for name, _dct in connection_dict.items():
if _dct["type"] == "BingConnection":
continue
local_client.connections.create_or_update(_Connection._from_execution_connection_dict(name=name, data=_dct))
_connection_setup = True
@pytest.fixture
def setup_experiment_table():
with mock.patch("promptflow._sdk._configuration.Configuration.is_internal_features_enabled") as mock_func:
mock_func.return_value = True
# Call this session to initialize session maker, then add experiment table
from promptflow._sdk._orm import Experiment, mgmt_db_session
from promptflow._sdk._orm.session import create_index_if_not_exists, create_or_update_table
mgmt_db_session()
engine = create_engine(f"sqlite:///{str(LOCAL_MGMT_DB_PATH)}", future=True)
if Configuration.get_instance().is_internal_features_enabled():
create_or_update_table(engine, orm_class=Experiment, tablename=EXPERIMENT_TABLE_NAME)
create_index_if_not_exists(engine, EXPERIMENT_CREATED_ON_INDEX_NAME, EXPERIMENT_TABLE_NAME, "created_on")
@pytest.fixture
def flow_serving_client(mocker: MockerFixture):
model_path = (Path(MODEL_ROOT) / "basic-with-connection").resolve().absolute().as_posix()
mocker.patch.dict(os.environ, {"PROMPTFLOW_PROJECT_PATH": model_path})
mocker.patch.dict(os.environ, {"USER_AGENT": "test-user-agent"})
app = create_serving_app(environment_variables={"API_TYPE": "${azure_open_ai_connection.api_type}"})
app.config.update(
{
"TESTING": True,
}
)
return app.test_client()
@pytest.fixture
def flow_serving_client_with_encoded_connection(mocker: MockerFixture):
from promptflow._core.connection_manager import ConnectionManager
from promptflow._sdk._serving.utils import encode_dict
connection_dict = json.loads(open(CONNECTION_FILE, "r").read())
connection_manager = ConnectionManager(connection_dict)
connections = {"PROMPTFLOW_ENCODED_CONNECTIONS": encode_dict(connection_manager.to_connections_dict())}
return create_client_by_model("basic-with-connection", mocker, connections, extension_type="azureml")
@pytest.fixture
def evaluation_flow_serving_client(mocker: MockerFixture):
model_path = (Path(MODEL_ROOT) / "web_classification").resolve().absolute().as_posix()
mocker.patch.dict(os.environ, {"PROMPTFLOW_PROJECT_PATH": model_path})
app = create_serving_app()
app.config.update(
{
"TESTING": True,
}
)
return app.test_client()
def create_client_by_model(
model_name: str, mocker: MockerFixture, connections: dict = {}, extension_type=None, environment_variables={}
):
model_path = (Path(MODEL_ROOT) / model_name).resolve().absolute().as_posix()
mocker.patch.dict(os.environ, {"PROMPTFLOW_PROJECT_PATH": model_path})
if connections:
mocker.patch.dict(os.environ, connections)
if extension_type and extension_type == "azureml":
environment_variables["API_TYPE"] = "${azure_open_ai_connection.api_type}"
app = create_serving_app(environment_variables=environment_variables, extension_type=extension_type)
app.config.update(
{
"TESTING": True,
}
)
return app.test_client()
@pytest.fixture
def serving_client_llm_chat(mocker: MockerFixture):
return create_client_by_model("chat_flow_with_stream_output", mocker)
@pytest.fixture
def serving_client_python_stream_tools(mocker: MockerFixture):
return create_client_by_model("python_stream_tools", mocker)
@pytest.fixture
def sample_image():
image_path = (Path(MODEL_ROOT) / "python_tool_with_simple_image" / "logo.jpg").resolve()
return base64.b64encode(open(image_path, "rb").read()).decode("utf-8")
@pytest.fixture
def serving_client_image_python_flow(mocker: MockerFixture):
return create_client_by_model("python_tool_with_simple_image", mocker)
@pytest.fixture
def serving_client_composite_image_flow(mocker: MockerFixture):
return create_client_by_model("python_tool_with_composite_image", mocker)
@pytest.fixture
def serving_client_with_environment_variables(mocker: MockerFixture):
return create_client_by_model(
"flow_with_environment_variables",
mocker,
environment_variables={"env2": "runtime_env2", "env10": "aaaaa"},
)
@pytest.fixture
def recording_file_override(request: pytest.FixtureRequest, mocker: MockerFixture):
if RecordStorage.is_replaying_mode() or RecordStorage.is_recording_mode():
file_path = RECORDINGS_TEST_CONFIGS_ROOT / "node_cache.shelve"
RecordStorage.get_instance(file_path)
yield
SpawnProcess = multiprocessing.get_context("spawn").Process
class MockSpawnProcess(SpawnProcess):
def __init__(self, group=None, target=None, *args, **kwargs):
if target == _process_wrapper:
target = _mock_process_wrapper
if target == create_spawned_fork_process_manager:
target = _mock_create_spawned_fork_process_manager
super().__init__(group, target, *args, **kwargs)
@pytest.fixture
def recording_injection(mocker: MockerFixture, recording_file_override):
original_process_class = multiprocessing.get_context("spawn").Process
multiprocessing.get_context("spawn").Process = MockSpawnProcess
if "spawn" == multiprocessing.get_start_method():
multiprocessing.Process = MockSpawnProcess
patches = setup_recording_injection_if_enabled()
try:
yield (RecordStorage.is_replaying_mode() or RecordStorage.is_recording_mode(), recording_array_extend)
finally:
if RecordStorage.is_replaying_mode() or RecordStorage.is_recording_mode():
RecordStorage.get_instance().delete_lock_file()
recording_array_reset()
multiprocessing.get_context("spawn").Process = original_process_class
if "spawn" == multiprocessing.get_start_method():
multiprocessing.Process = original_process_class
for patcher in patches:
patcher.stop()
def setup_recording_injection_if_enabled():
patches = []
if RecordStorage.is_replaying_mode() or RecordStorage.is_recording_mode():
file_path = RECORDINGS_TEST_CONFIGS_ROOT / "node_cache.shelve"
RecordStorage.get_instance(file_path)
from promptflow._core.tool import tool as original_tool
mocked_tool = mock_tool(original_tool)
patch_targets = ["promptflow._core.tool.tool", "promptflow._internal.tool", "promptflow.tool"]
for target in patch_targets:
patcher = patch(target, mocked_tool)
patches.append(patcher)
patcher.start()
return patches
def _mock_process_wrapper(*args, **kwargs):
setup_recording_injection_if_enabled()
return _process_wrapper(*args, **kwargs)
def _mock_create_spawned_fork_process_manager(*args, **kwargs):
setup_recording_injection_if_enabled()
return create_spawned_fork_process_manager(*args, **kwargs)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/.coveragerc | [run]
source =
*/promptflow/_cli/*
*/promptflow/_sdk/*
*/promptflow/azure/*
omit =
*/promptflow/azure/_restclient/*
*__init__.py*
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_connection.py | import uuid
from pathlib import Path
import pydash
import pytest
from promptflow._sdk._constants import SCRUBBED_VALUE
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities import AzureOpenAIConnection, CustomConnection
_client = PFClient()
TEST_ROOT = Path(__file__).parent.parent.parent
CONNECTION_ROOT = TEST_ROOT / "test_configs/connections"
@pytest.mark.cli_test
@pytest.mark.e2etest
class TestConnection:
def test_connection_operations(self):
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = AzureOpenAIConnection(name=name, api_key="test", api_base="test")
# Create
_client.connections.create_or_update(conn)
# Get
result = _client.connections.get(name)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "azure_open_ai",
"api_key": "******",
"api_base": "test",
"api_type": "azure",
"api_version": "2023-07-01-preview",
}
# Update
conn.api_base = "test2"
result = _client.connections.create_or_update(conn)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "azure_open_ai",
"api_key": "******",
"api_base": "test2",
"api_type": "azure",
"api_version": "2023-07-01-preview",
}
# List
result = _client.connections.list()
assert len(result) > 0
# Delete
_client.connections.delete(name)
with pytest.raises(Exception) as e:
_client.connections.get(name)
assert "is not found." in str(e.value)
def test_connection_get_and_update(self):
# Test api key not updated
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = AzureOpenAIConnection(name=name, api_key="test", api_base="test")
result = _client.connections.create_or_update(conn)
assert result.api_key == SCRUBBED_VALUE
# Update api_base only Assert no exception
result.api_base = "test2"
result = _client.connections.create_or_update(result)
assert result._to_dict()["api_base"] == "test2"
# Assert value not scrubbed
assert result._secrets["api_key"] == "test"
_client.connections.delete(name)
# Invalid update
with pytest.raises(Exception) as e:
result._secrets = {}
_client.connections.create_or_update(result)
assert "secrets ['api_key'] value invalid, please fill them" in str(e.value)
def test_custom_connection_get_and_update(self):
# Test api key not updated
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = CustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
result = _client.connections.create_or_update(conn)
assert result.secrets["api_key"] == SCRUBBED_VALUE
# Update api_base only Assert no exception
result.configs["api_base"] = "test2"
result = _client.connections.create_or_update(result)
assert result._to_dict()["configs"]["api_base"] == "test2"
# Assert value not scrubbed
assert result._secrets["api_key"] == "test"
_client.connections.delete(name)
# Invalid update
with pytest.raises(Exception) as e:
result._secrets = {}
_client.connections.create_or_update(result)
assert "secrets ['api_key'] value invalid, please fill them" in str(e.value)
@pytest.mark.parametrize(
"file_name, expected_updated_item, expected_secret_item",
[
("azure_openai_connection.yaml", ("api_base", "new_value"), ("api_key", "<to-be-replaced>")),
("custom_connection.yaml", ("key1", "new_value"), ("key2", "test2")),
],
)
def test_upsert_connection_from_file(self, file_name, expected_updated_item, expected_secret_item):
from promptflow._cli._pf._connection import _upsert_connection_from_file
name = f"Connection_{str(uuid.uuid4())[:4]}"
result = _upsert_connection_from_file(file=CONNECTION_ROOT / file_name, params_override=[{"name": name}])
assert result is not None
update_file_name = f"update_{file_name}"
result = _upsert_connection_from_file(file=CONNECTION_ROOT / update_file_name, params_override=[{"name": name}])
# Test secrets not updated, and configs updated
assert (
result.configs[expected_updated_item[0]] == expected_updated_item[1]
), "Assert configs updated failed, expected: {}, actual: {}".format(
expected_updated_item[1], result.configs[expected_updated_item[0]]
)
assert (
result._secrets[expected_secret_item[0]] == expected_secret_item[1]
), "Assert secrets not updated failed, expected: {}, actual: {}".format(
expected_secret_item[1], result._secrets[expected_secret_item[0]]
)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_custom_strong_type_connection.py | import uuid
from pathlib import Path
import pydash
import pytest
from promptflow._sdk._constants import SCRUBBED_VALUE, CustomStrongTypeConnectionConfigs
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow.contracts.types import Secret
class MyCustomConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
_client = PFClient()
TEST_ROOT = Path(__file__).parent.parent.parent
CONNECTION_ROOT = TEST_ROOT / "test_configs/connections"
@pytest.mark.cli_test
@pytest.mark.e2etest
class TestCustomStrongTypeConnection:
def test_connection_operations(self):
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = MyCustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
# Create
_client.connections.create_or_update(conn)
# Get
result = _client.connections.get(name)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "custom",
"configs": {
"api_base": "test",
"promptflow.connection.custom_type": "MyCustomConnection",
"promptflow.connection.module": "sdk_cli_test.e2etests.test_custom_strong_type_connection",
},
"secrets": {"api_key": "******"},
}
# Update
conn.configs["api_base"] = "test2"
result = _client.connections.create_or_update(conn)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "custom",
"configs": {
"api_base": "test2",
"promptflow.connection.custom_type": "MyCustomConnection",
"promptflow.connection.module": "sdk_cli_test.e2etests.test_custom_strong_type_connection",
},
"secrets": {"api_key": "******"},
}
# List
result = _client.connections.list()
assert len(result) > 0
# Delete
_client.connections.delete(name)
with pytest.raises(Exception) as e:
_client.connections.get(name)
assert "is not found." in str(e.value)
def test_connection_update(self):
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = MyCustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
# Create
_client.connections.create_or_update(conn)
# Get
custom_conn = _client.connections.get(name)
assert pydash.omit(custom_conn._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "custom",
"configs": {
"api_base": "test",
"promptflow.connection.custom_type": "MyCustomConnection",
"promptflow.connection.module": "sdk_cli_test.e2etests.test_custom_strong_type_connection",
},
"secrets": {"api_key": "******"},
}
# Update
custom_conn.configs["api_base"] = "test2"
result = _client.connections.create_or_update(custom_conn)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "custom",
"configs": {
"api_base": "test2",
"promptflow.connection.custom_type": "MyCustomConnection",
"promptflow.connection.module": "sdk_cli_test.e2etests.test_custom_strong_type_connection",
},
"secrets": {"api_key": "******"},
}
# List
result = _client.connections.list()
assert len(result) > 0
# Delete
_client.connections.delete(name)
with pytest.raises(Exception) as e:
_client.connections.get(name)
assert "is not found." in str(e.value)
def test_connection_get_and_update(self):
# Test api key not updated
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = MyCustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
result = _client.connections.create_or_update(conn)
assert result.secrets["api_key"] == SCRUBBED_VALUE
# Update api_base only Assert no exception
result.configs["api_base"] = "test2"
result = _client.connections.create_or_update(result)
assert result._to_dict()["configs"]["api_base"] == "test2"
# Assert value not scrubbed
assert result._secrets["api_key"] == "test"
_client.connections.delete(name)
# Invalid update
with pytest.raises(Exception) as e:
result._secrets = {}
_client.connections.create_or_update(result)
assert "secrets ['api_key'] value invalid, please fill them" in str(e.value)
def test_connection_get_and_update_with_key(self):
# Test api key not updated
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = MyCustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
assert conn.api_base == "test"
assert conn.configs["api_base"] == "test"
result = _client.connections.create_or_update(conn)
converted_conn = result._convert_to_custom_strong_type(
module=__class__.__module__, to_class="MyCustomConnection"
)
assert isinstance(converted_conn, MyCustomConnection)
assert converted_conn.api_base == "test"
converted_conn.api_base = "test2"
assert converted_conn.api_base == "test2"
assert converted_conn.configs["api_base"] == "test2"
@pytest.mark.parametrize(
"file_name, expected_updated_item, expected_secret_item",
[
("custom_strong_type_connection.yaml", ("api_base", "new_value"), ("api_key", "<to-be-replaced>")),
],
)
def test_upsert_connection_from_file(
self, install_custom_tool_pkg, file_name, expected_updated_item, expected_secret_item
):
from promptflow._cli._pf._connection import _upsert_connection_from_file
name = f"Connection_{str(uuid.uuid4())[:4]}"
result = _upsert_connection_from_file(file=CONNECTION_ROOT / file_name, params_override=[{"name": name}])
assert result is not None
assert result.configs[CustomStrongTypeConnectionConfigs.PROMPTFLOW_MODULE_KEY] == "my_tool_package.connections"
update_file_name = f"update_{file_name}"
result = _upsert_connection_from_file(file=CONNECTION_ROOT / update_file_name, params_override=[{"name": name}])
# Test secrets not updated, and configs updated
assert (
result.configs[expected_updated_item[0]] == expected_updated_item[1]
), "Assert configs updated failed, expected: {}, actual: {}".format(
expected_updated_item[1], result.configs[expected_updated_item[0]]
)
assert (
result._secrets[expected_secret_item[0]] == expected_secret_item[1]
), "Assert secrets not updated failed, expected: {}, actual: {}".format(
expected_secret_item[1], result._secrets[expected_secret_item[0]]
)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_orm.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import datetime
import json
import uuid
import pytest
from promptflow._sdk._constants import ListViewType, RunStatus, RunTypes
from promptflow._sdk._errors import RunNotFoundError
from promptflow._sdk._orm import RunInfo
@pytest.fixture()
def run_name() -> str:
name = str(uuid.uuid4())
run_info = RunInfo(
name=name,
type=RunTypes.BATCH,
created_on=datetime.datetime.now().isoformat(),
status=RunStatus.NOT_STARTED,
display_name=name,
description="",
tags=None,
properties=json.dumps({}),
)
run_info.dump()
return name
@pytest.mark.sdk_test
@pytest.mark.e2etest
class TestRunInfo:
def test_get(self, run_name: str) -> None:
run_info = RunInfo.get(run_name)
assert run_info.name == run_name
assert run_info.type == RunTypes.BATCH
assert run_info.status == RunStatus.NOT_STARTED
assert run_info.display_name == run_name
assert run_info.description == ""
assert run_info.tags is None
assert run_info.properties == json.dumps({})
def test_get_not_exist(self) -> None:
not_exist_name = str(uuid.uuid4())
with pytest.raises(RunNotFoundError) as excinfo:
RunInfo.get(not_exist_name)
assert f"Run name {not_exist_name!r} cannot be found." in str(excinfo.value)
def test_list_order_by_created_time_desc(self) -> None:
for _ in range(3):
RunInfo(
name=str(uuid.uuid4()),
created_on=datetime.datetime.now().isoformat(),
status=RunStatus.NOT_STARTED,
description="",
tags=None,
properties=json.dumps({}),
).dump()
runs = RunInfo.list(max_results=3, list_view_type=ListViewType.ALL)
# in very edge case, the created_on can be same, so use ">=" here
assert runs[0].created_on >= runs[1].created_on >= runs[2].created_on
def test_archive(self, run_name: str) -> None:
run_info = RunInfo.get(run_name)
assert run_info.archived is False
run_info.archive()
# in-memory archived flag
assert run_info.archived is True
# db archived flag
assert RunInfo.get(run_name).archived is True
def test_restore(self, run_name: str) -> None:
run_info = RunInfo.get(run_name)
run_info.archive()
run_info = RunInfo.get(run_name)
assert run_info.archived is True
run_info.restore()
# in-memory archived flag
assert run_info.archived is False
# db archived flag
assert RunInfo.get(run_name).archived is False
def test_update(self, run_name: str) -> None:
run_info = RunInfo.get(run_name)
assert run_info.status == RunStatus.NOT_STARTED
assert run_info.display_name == run_name
assert run_info.description == ""
assert run_info.tags is None
updated_status = RunStatus.COMPLETED
updated_display_name = f"updated_{run_name}"
updated_description = "updated_description"
updated_tags = [{"key1": "value1", "key2": "value2"}]
run_info.update(
status=updated_status,
display_name=updated_display_name,
description=updated_description,
tags=updated_tags,
)
# in-memory status, display_name, description and tags
assert run_info.status == updated_status
assert run_info.display_name == updated_display_name
assert run_info.description == updated_description
assert run_info.tags == json.dumps(updated_tags)
# db status, display_name, description and tags
run_info = RunInfo.get(run_name)
assert run_info.status == updated_status
assert run_info.display_name == updated_display_name
assert run_info.description == updated_description
assert run_info.tags == json.dumps(updated_tags)
def test_null_type_and_display_name(self) -> None:
# test run_info table schema change:
# 1. type can be null(we will deprecate this concept in the future)
# 2. display_name can be null as default value
name = str(uuid.uuid4())
run_info = RunInfo(
name=name,
created_on=datetime.datetime.now().isoformat(),
status=RunStatus.NOT_STARTED,
description="",
tags=None,
properties=json.dumps({}),
)
run_info.dump()
run_info_from_db = RunInfo.get(name)
assert run_info_from_db.type is None
assert run_info_from_db.display_name is None
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_run.py | import os
import shutil
import sys
import tempfile
import uuid
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from marshmallow import ValidationError
from pytest_mock import MockerFixture
from promptflow import PFClient
from promptflow._constants import PROMPTFLOW_CONNECTIONS
from promptflow._sdk._constants import (
FLOW_DIRECTORY_MACRO_IN_CONFIG,
PROMPT_FLOW_DIR_NAME,
FlowRunProperties,
LocalStorageFilenames,
RunStatus,
)
from promptflow._sdk._errors import (
ConnectionNotFoundError,
InvalidFlowError,
InvalidRunError,
InvalidRunStatusError,
RunExistsError,
RunNotFoundError,
)
from promptflow._sdk._load_functions import load_flow, load_run
from promptflow._sdk._run_functions import create_yaml_run
from promptflow._sdk._submitter.utils import SubmitterHelper
from promptflow._sdk._utils import _get_additional_includes
from promptflow._sdk.entities import Run
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
from promptflow.connections import AzureOpenAIConnection
from promptflow.exceptions import UserErrorException
from ..recording_utilities import RecordStorage
PROMOTFLOW_ROOT = Path(__file__) / "../../../.."
TEST_ROOT = Path(__file__).parent.parent.parent
MODEL_ROOT = TEST_ROOT / "test_configs/e2e_samples"
CONNECTION_FILE = (PROMOTFLOW_ROOT / "connections.json").resolve().absolute().as_posix()
FLOWS_DIR = "./tests/test_configs/flows"
EAGER_FLOWS_DIR = "./tests/test_configs/eager_flows"
RUNS_DIR = "./tests/test_configs/runs"
DATAS_DIR = "./tests/test_configs/datas"
def create_run_against_multi_line_data(client) -> Run:
return client.run(
flow=f"{FLOWS_DIR}/web_classification",
data=f"{DATAS_DIR}/webClassification3.jsonl",
column_mapping={"url": "${data.url}"},
)
def create_run_against_multi_line_data_without_llm(client: PFClient) -> Run:
return client.run(
flow=f"{FLOWS_DIR}/print_env_var",
data=f"{DATAS_DIR}/env_var_names.jsonl",
)
def create_run_against_run(client, run: Run) -> Run:
return client.run(
flow=f"{FLOWS_DIR}/classification_accuracy_evaluation",
data=f"{DATAS_DIR}/webClassification3.jsonl",
run=run.name,
column_mapping={
"groundtruth": "${data.answer}",
"prediction": "${run.outputs.category}",
"variant_id": "${data.variant_id}",
},
)
def assert_run_with_invalid_column_mapping(client: PFClient, run: Run) -> None:
assert run.status == RunStatus.FAILED
with pytest.raises(InvalidRunStatusError):
client.stream(run.name)
local_storage = LocalStorageOperations(run)
assert os.path.exists(local_storage._exception_path)
exception = local_storage.load_exception()
assert "The input for batch run is incorrect. Couldn't find these mapping relations" in exception["message"]
assert exception["code"] == "UserError"
assert exception["innerError"]["innerError"]["code"] == "BulkRunException"
@pytest.mark.usefixtures(
"use_secrets_config_file", "recording_injection", "setup_local_connection", "install_custom_tool_pkg"
)
@pytest.mark.sdk_test
@pytest.mark.e2etest
class TestFlowRun:
def test_basic_flow_bulk_run(self, azure_open_ai_connection: AzureOpenAIConnection, pf) -> None:
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
pf.run(flow=f"{FLOWS_DIR}/web_classification", data=data_path)
# Test repeated execute flow run
pf.run(flow=f"{FLOWS_DIR}/web_classification", data=data_path)
pf.run(flow=f"{FLOWS_DIR}/web_classification_v1", data=data_path)
pf.run(flow=f"{FLOWS_DIR}/web_classification_v2", data=data_path)
# TODO: check details
# df = pf.show_details(baseline, v1, v2)
def test_basic_run_bulk(self, azure_open_ai_connection: AzureOpenAIConnection, local_client, pf):
result = pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"url": "${data.url}"},
)
local_storage = LocalStorageOperations(result)
detail = local_storage.load_detail()
tuning_node = next((x for x in detail["node_runs"] if x["node"] == "summarize_text_content"), None)
# used default variant config
assert tuning_node["inputs"]["temperature"] == 0.3
assert "variant_0" in result.name
run = local_client.runs.get(name=result.name)
assert run.status == "Completed"
# write to user_dir/.promptflow/.runs
assert ".promptflow" in run.properties["output_path"]
def test_local_storage_delete(self, pf):
result = pf.run(flow=f"{FLOWS_DIR}/print_env_var", data=f"{DATAS_DIR}/env_var_names.jsonl")
local_storage = LocalStorageOperations(result)
local_storage.delete()
assert not os.path.exists(local_storage._outputs_path)
def test_flow_run_delete(self, pf):
result = pf.run(flow=f"{FLOWS_DIR}/print_env_var", data=f"{DATAS_DIR}/env_var_names.jsonl")
local_storage = LocalStorageOperations(result)
output_path = local_storage.path
# delete new created run by name
pf.runs.delete(result.name)
# check folders and dbs are deleted
assert not os.path.exists(output_path)
from promptflow._sdk._orm import RunInfo as ORMRun
pytest.raises(RunNotFoundError, lambda: ORMRun.get(result.name))
pytest.raises(RunNotFoundError, lambda: pf.runs.get(result.name))
def test_flow_run_delete_fake_id_raise(self, pf: PFClient):
run = "fake_run_id"
# delete new created run by name
pytest.raises(RunNotFoundError, lambda: pf.runs.delete(name=run))
@pytest.mark.skipif(sys.platform == "win32", reason="Windows doesn't support chmod, just test permission errors")
def test_flow_run_delete_invalid_permission_raise(self, pf: PFClient):
result = pf.run(flow=f"{FLOWS_DIR}/print_env_var", data=f"{DATAS_DIR}/env_var_names.jsonl")
local_storage = LocalStorageOperations(result)
output_path = local_storage.path
os.chmod(output_path, 0o555)
# delete new created run by name
pytest.raises(InvalidRunError, lambda: pf.runs.delete(name=result.name))
# Change folder permission back
os.chmod(output_path, 0o755)
pf.runs.delete(name=result.name)
assert not os.path.exists(output_path)
def test_visualize_run_with_referenced_run_deleted(self, pf: PFClient):
run_id = str(uuid.uuid4())
run = load_run(
source=f"{RUNS_DIR}/sample_bulk_run.yaml",
params_override=[{"name": run_id}],
)
run_a = pf.runs.create_or_update(run=run)
local_storage_a = LocalStorageOperations(run_a)
output_path_a = local_storage_a.path
run = load_run(source=f"{RUNS_DIR}/sample_eval_run.yaml", params_override=[{"run": run_id}])
run_b = pf.runs.create_or_update(run=run)
local_storage_b = LocalStorageOperations(run_b)
output_path_b = local_storage_b.path
pf.runs.delete(run_a.name)
assert not os.path.exists(output_path_a)
assert os.path.exists(output_path_b)
# visualize doesn't raise error
pf.runs.visualize(run_b.name)
def test_basic_flow_with_variant(self, azure_open_ai_connection: AzureOpenAIConnection, local_client, pf) -> None:
result = pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"url": "${data.url}"},
variant="${summarize_text_content.variant_0}",
)
local_storage = LocalStorageOperations(result)
detail = local_storage.load_detail()
tuning_node = next((x for x in detail["node_runs"] if x["node"] == "summarize_text_content"), None)
assert "variant_0" in result.name
# used variant_0 config
assert tuning_node["inputs"]["temperature"] == 0.2
result = pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"url": "${data.url}"},
variant="${summarize_text_content.variant_1}",
)
local_storage = LocalStorageOperations(result)
detail = local_storage.load_detail()
tuning_node = next((x for x in detail["node_runs"] if x["node"] == "summarize_text_content"), None)
assert "variant_1" in result.name
# used variant_1 config
assert tuning_node["inputs"]["temperature"] == 0.3
def test_run_bulk_error(self, pf):
# path not exist
with pytest.raises(FileNotFoundError) as e:
pf.run(
flow=f"{MODEL_ROOT}/not_exist",
data=f"{DATAS_DIR}/webClassification3.jsonl",
column_mapping={"question": "${data.question}", "context": "${data.context}"},
variant="${summarize_text_content.variant_0}",
)
assert "not exist" in str(e.value)
# tuning_node not exist
with pytest.raises(InvalidFlowError) as e:
pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=f"{DATAS_DIR}/webClassification3.jsonl",
column_mapping={"question": "${data.question}", "context": "${data.context}"},
variant="${not_exist.variant_0}",
)
assert "Node not_exist not found in flow" in str(e.value)
# invalid variant format
with pytest.raises(UserErrorException) as e:
pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=f"{DATAS_DIR}/webClassification3.jsonl",
column_mapping={"question": "${data.question}", "context": "${data.context}"},
variant="v",
)
assert "Invalid variant format: v, variant should be in format of ${TUNING_NODE.VARIANT}" in str(e.value)
def test_basic_evaluation(self, azure_open_ai_connection: AzureOpenAIConnection, local_client, pf):
result = pf.run(
flow=f"{FLOWS_DIR}/print_env_var",
data=f"{DATAS_DIR}/env_var_names.jsonl",
)
assert local_client.runs.get(result.name).status == "Completed"
eval_result = pf.run(
flow=f"{FLOWS_DIR}/classification_accuracy_evaluation",
run=result.name,
column_mapping={
"prediction": "${run.outputs.output}",
# evaluation reference run.inputs
# NOTE: we need this value to guard behavior when a run reference another run's inputs
"variant_id": "${run.inputs.key}",
# can reference other columns in data which doesn't exist in base run's inputs
"groundtruth": "${run.inputs.extra_key}",
},
)
assert local_client.runs.get(eval_result.name).status == "Completed"
def test_flow_demo(self, azure_open_ai_connection, pf):
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
column_mapping = {
"groundtruth": "${data.answer}",
"prediction": "${run.outputs.category}",
"variant_id": "${data.variant_id}",
}
metrics = {}
for flow_name, output_key in [
("web_classification", "baseline"),
("web_classification_v1", "v1"),
("web_classification_v2", "v2"),
]:
v = pf.run(flow=f"{FLOWS_DIR}/web_classification", data=data_path)
metrics[output_key] = pf.run(
flow=f"{FLOWS_DIR}/classification_accuracy_evaluation",
data=data_path,
run=v,
column_mapping=column_mapping,
)
def test_submit_run_from_yaml(self, local_client, pf):
run_id = str(uuid.uuid4())
run = create_yaml_run(source=f"{RUNS_DIR}/sample_bulk_run.yaml", params_override=[{"name": run_id}])
assert local_client.runs.get(run.name).status == "Completed"
eval_run = create_yaml_run(
source=f"{RUNS_DIR}/sample_eval_run.yaml",
params_override=[{"run": run_id}],
)
assert local_client.runs.get(eval_run.name).status == "Completed"
@pytest.mark.usefixtures("enable_logger_propagate")
def test_submit_run_with_extra_params(self, pf, caplog):
run_id = str(uuid.uuid4())
run = create_yaml_run(source=f"{RUNS_DIR}/extra_field.yaml", params_override=[{"name": run_id}])
assert pf.runs.get(run.name).status == "Completed"
assert "Run schema validation warnings. Unknown fields found" in caplog.text
def test_run_with_connection(self, local_client, local_aoai_connection, pf):
# remove connection file to test connection resolving
os.environ.pop(PROMPTFLOW_CONNECTIONS)
result = pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"url": "${data.url}"},
)
local_storage = LocalStorageOperations(result)
detail = local_storage.load_detail()
tuning_node = next((x for x in detail["node_runs"] if x["node"] == "summarize_text_content"), None)
# used default variant config
assert tuning_node["inputs"]["temperature"] == 0.3
run = local_client.runs.get(name=result.name)
assert run.status == "Completed"
def test_run_with_connection_overwrite(self, local_client, local_aoai_connection, local_alt_aoai_connection, pf):
result = pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=f"{DATAS_DIR}/webClassification1.jsonl",
connections={"classify_with_llm": {"connection": "new_ai_connection"}},
)
run = local_client.runs.get(name=result.name)
assert run.status == "Completed"
def test_custom_connection_overwrite(self, local_client, local_custom_connection, pf):
result = pf.run(
flow=f"{FLOWS_DIR}/custom_connection_flow",
data=f"{DATAS_DIR}/env_var_names.jsonl",
connections={"print_env": {"connection": "test_custom_connection"}},
)
run = local_client.runs.get(name=result.name)
assert run.status == "Completed"
# overwrite non-exist connection
with pytest.raises(InvalidFlowError) as e:
pf.run(
flow=f"{FLOWS_DIR}/custom_connection_flow",
data=f"{DATAS_DIR}/env_var_names.jsonl",
connections={"print_env": {"new_connection": "test_custom_connection"}},
)
assert "Connection with name new_connection not found" in str(e.value)
def test_basic_flow_with_package_tool_with_custom_strong_type_connection(
self, install_custom_tool_pkg, local_client, pf
):
result = pf.run(
flow=f"{FLOWS_DIR}/flow_with_package_tool_with_custom_strong_type_connection",
data=f"{FLOWS_DIR}/flow_with_package_tool_with_custom_strong_type_connection/data.jsonl",
connections={"My_First_Tool_00f8": {"connection": "custom_strong_type_connection"}},
)
run = local_client.runs.get(name=result.name)
assert run.status == "Completed"
def test_basic_flow_with_script_tool_with_custom_strong_type_connection(
self, install_custom_tool_pkg, local_client, pf
):
# Prepare custom connection
from promptflow.connections import CustomConnection
conn = CustomConnection(name="custom_connection_2", secrets={"api_key": "test"}, configs={"api_url": "test"})
local_client.connections.create_or_update(conn)
result = pf.run(
flow=f"{FLOWS_DIR}/flow_with_script_tool_with_custom_strong_type_connection",
data=f"{FLOWS_DIR}/flow_with_script_tool_with_custom_strong_type_connection/data.jsonl",
)
run = local_client.runs.get(name=result.name)
assert run.status == "Completed"
def test_run_with_connection_overwrite_non_exist(self, local_client, local_aoai_connection, pf):
# overwrite non_exist connection
with pytest.raises(ConnectionNotFoundError):
pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=f"{DATAS_DIR}/webClassification1.jsonl",
connections={"classify_with_llm": {"connection": "Not_exist"}},
)
def test_run_reference_failed_run(self, pf):
failed_run = pf.run(
flow=f"{FLOWS_DIR}/failed_flow",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"text": "${data.url}"},
)
# "update" run status to failed since currently all run will be completed unless there's bug
pf.runs.update(
name=failed_run.name,
status="Failed",
)
run_name = str(uuid.uuid4())
with pytest.raises(UserErrorException) as e:
pf.run(
name=run_name,
flow=f"{FLOWS_DIR}/custom_connection_flow",
run=failed_run,
connections={"print_env": {"connection": "test_custom_connection"}},
)
assert "is not completed, got status" in str(e.value)
# run should not be created
with pytest.raises(RunNotFoundError):
pf.runs.get(name=run_name)
def test_referenced_output_not_exist(self, pf: PFClient) -> None:
# failed run won't generate output
failed_run = pf.run(
flow=f"{FLOWS_DIR}/failed_flow",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"text": "${data.url}"},
)
run_name = str(uuid.uuid4())
run = pf.run(
name=run_name,
run=failed_run,
flow=f"{FLOWS_DIR}/failed_flow",
column_mapping={"text": "${run.outputs.text}"},
)
assert_run_with_invalid_column_mapping(pf, run)
def test_connection_overwrite_file(self, local_client, local_aoai_connection):
run = create_yaml_run(
source=f"{RUNS_DIR}/run_with_connections.yaml",
)
run = local_client.runs.get(name=run.name)
assert run.status == "Completed"
def test_connection_overwrite_model(self, local_client, local_aoai_connection):
run = create_yaml_run(
source=f"{RUNS_DIR}/run_with_connections_model.yaml",
)
run = local_client.runs.get(name=run.name)
assert run.status == "Completed"
def test_resolve_connection(self, local_client, local_aoai_connection):
flow = load_flow(f"{FLOWS_DIR}/web_classification_no_variants")
connections = SubmitterHelper.resolve_connections(flow, local_client)
assert local_aoai_connection.name in connections
def test_run_with_env_overwrite(self, local_client, local_aoai_connection):
run = create_yaml_run(
source=f"{RUNS_DIR}/run_with_env.yaml",
)
outputs = local_client.runs._get_outputs(run=run)
assert outputs["output"][0] == local_aoai_connection.api_base
def test_pf_run_with_env_overwrite(self, local_client, local_aoai_connection, pf):
run = pf.run(
flow=f"{FLOWS_DIR}/print_env_var",
data=f"{DATAS_DIR}/env_var_names.jsonl",
environment_variables={"API_BASE": "${azure_open_ai_connection.api_base}"},
)
outputs = local_client.runs._get_outputs(run=run)
assert outputs["output"][0] == local_aoai_connection.api_base
def test_eval_run_not_exist(self, pf):
name = str(uuid.uuid4())
with pytest.raises(RunNotFoundError) as e:
pf.runs.create_or_update(
run=Run(
name=name,
flow=Path(f"{FLOWS_DIR}/classification_accuracy_evaluation"),
run="not_exist",
column_mapping={
"groundtruth": "${data.answer}",
"prediction": "${run.outputs.category}",
# evaluation reference run.inputs
"url": "${run.inputs.url}",
},
)
)
assert "Run name 'not_exist' cannot be found" in str(e.value)
# run should not be created
with pytest.raises(RunNotFoundError):
pf.runs.get(name=name)
def test_eval_run_data_deleted(self, pf):
with tempfile.TemporaryDirectory() as temp_dir:
shutil.copy(f"{DATAS_DIR}/env_var_names.jsonl", temp_dir)
result = pf.run(
flow=f"{FLOWS_DIR}/print_env_var",
data=f"{temp_dir}/env_var_names.jsonl",
)
assert pf.runs.get(result.name).status == "Completed"
# delete original run's input data
os.remove(f"{temp_dir}/env_var_names.jsonl")
with pytest.raises(UserErrorException) as e:
pf.run(
flow=f"{FLOWS_DIR}/classification_accuracy_evaluation",
run=result.name,
column_mapping={
"prediction": "${run.outputs.output}",
# evaluation reference run.inputs
# NOTE: we need this value to guard behavior when a run reference another run's inputs
"variant_id": "${run.inputs.key}",
# can reference other columns in data which doesn't exist in base run's inputs
"groundtruth": "${run.inputs.extra_key}",
},
)
assert "Please make sure it exists and not deleted." in str(e.value)
def test_eval_run_data_not_exist(self, pf):
base_run = pf.run(
flow=f"{FLOWS_DIR}/print_env_var",
data=f"{DATAS_DIR}/env_var_names.jsonl",
)
assert pf.runs.get(base_run.name).status == "Completed"
eval_run = pf.run(
flow=f"{FLOWS_DIR}/classification_accuracy_evaluation",
run=base_run.name,
column_mapping={
"prediction": "${run.outputs.output}",
# evaluation reference run.inputs
# NOTE: we need this value to guard behavior when a run reference another run's inputs
"variant_id": "${run.inputs.key}",
# can reference other columns in data which doesn't exist in base run's inputs
"groundtruth": "${run.inputs.extra_key}",
},
)
result = pf.run(
flow=f"{FLOWS_DIR}/classification_accuracy_evaluation",
run=eval_run.name,
column_mapping={
"prediction": "${run.outputs.output}",
# evaluation reference run.inputs
# NOTE: we need this value to guard behavior when a run reference another run's inputs
"variant_id": "${run.inputs.key}",
# can reference other columns in data which doesn't exist in base run's inputs
"groundtruth": "${run.inputs.extra_key}",
},
)
# Run failed because run inputs data is None, and error will be in the run output error.json
assert result.status == "Failed"
def test_create_run_with_tags(self, pf):
name = str(uuid.uuid4())
display_name = "test_run_with_tags"
tags = {"key1": "tag1"}
run = pf.run(
name=name,
display_name=display_name,
tags=tags,
flow=f"{FLOWS_DIR}/print_env_var",
data=f"{DATAS_DIR}/env_var_names.jsonl",
environment_variables={"API_BASE": "${azure_open_ai_connection.api_base}"},
)
assert run.name == name
assert "test_run_with_tags" == run.display_name
assert run.tags == tags
def test_run_display_name(self, pf):
# use run name if not specify display_name
run = pf.runs.create_or_update(
run=Run(
flow=Path(f"{FLOWS_DIR}/print_env_var"),
data=f"{DATAS_DIR}/env_var_names.jsonl",
environment_variables={"API_BASE": "${azure_open_ai_connection.api_base}"},
)
)
assert run.display_name == run.name
assert "print_env_var" in run.display_name
# will respect if specified in run
base_run = pf.runs.create_or_update(
run=Run(
flow=Path(f"{FLOWS_DIR}/print_env_var"),
data=f"{DATAS_DIR}/env_var_names.jsonl",
environment_variables={"API_BASE": "${azure_open_ai_connection.api_base}"},
display_name="my_run",
)
)
assert base_run.display_name == "my_run"
run = pf.runs.create_or_update(
run=Run(
flow=Path(f"{FLOWS_DIR}/print_env_var"),
data=f"{DATAS_DIR}/env_var_names.jsonl",
environment_variables={"API_BASE": "${azure_open_ai_connection.api_base}"},
display_name="my_run_${variant_id}_${run}",
run=base_run,
)
)
assert run.display_name == f"my_run_variant_0_{base_run.name}"
run = pf.runs.create_or_update(
run=Run(
flow=Path(f"{FLOWS_DIR}/print_env_var"),
data=f"{DATAS_DIR}/env_var_names.jsonl",
environment_variables={"API_BASE": "${azure_open_ai_connection.api_base}"},
display_name="my_run_${timestamp}",
run=base_run,
)
)
assert "${timestamp}" not in run.display_name
def test_run_dump(self, azure_open_ai_connection: AzureOpenAIConnection, pf) -> None:
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
run = pf.run(flow=f"{FLOWS_DIR}/web_classification", data=data_path)
# in fact, `pf.run` will internally query the run from db in `RunSubmitter`
# explicitly call ORM get here to emphasize the dump operatoin
# if no dump operation, a RunNotFoundError will be raised here
pf.runs.get(run.name)
def test_run_list(self, azure_open_ai_connection: AzureOpenAIConnection, pf) -> None:
# create a run to ensure there is at least one run in the db
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
pf.run(flow=f"{FLOWS_DIR}/web_classification", data=data_path)
# not specify `max_result` here, so that if there are legacy runs in the db
# list runs API can collect them, and can somehow cover legacy schema
runs = pf.runs.list()
assert len(runs) >= 1
def test_stream_run_summary(self, azure_open_ai_connection: AzureOpenAIConnection, local_client, capfd, pf) -> None:
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
run = pf.run(flow=f"{FLOWS_DIR}/web_classification", data=data_path)
local_client.runs.stream(run.name)
out, _ = capfd.readouterr()
print(out)
assert 'Run status: "Completed"' in out
assert "Output path: " in out
def test_stream_incomplete_run_summary(
self, azure_open_ai_connection: AzureOpenAIConnection, local_client, capfd, pf
) -> None:
# use wrong data to create a failed run
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
name = str(uuid.uuid4())
run = pf.run(
flow=f"{FLOWS_DIR}/failed_flow",
data=data_path,
column_mapping={"text": "${data.url}"},
name=name,
)
local_client.runs.stream(run.name)
# assert error message in stream API
out, _ = capfd.readouterr()
assert 'Run status: "Completed"' in out
# won't print exception, use can get it from run._to_dict()
# assert "failed with exception" in out
def test_run_data_not_provided(self, pf):
with pytest.raises(ValueError) as e:
pf.run(
flow=f"{FLOWS_DIR}/web_classification",
)
assert "at least one of data or run must be provided" in str(e)
def test_get_details(self, azure_open_ai_connection: AzureOpenAIConnection, pf) -> None:
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
run = pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=data_path,
column_mapping={"url": "${data.url}"},
)
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
local_storage = LocalStorageOperations(run)
# there should be line_number in original DataFrame, but not in details DataFrame
# as we will set index on line_number to ensure the order
outputs = pd.read_json(local_storage._outputs_path, orient="records", lines=True)
details = pf.get_details(run)
assert "line_number" in outputs and "line_number" not in details
def test_visualize_run(self, azure_open_ai_connection: AzureOpenAIConnection, pf) -> None:
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
run1 = pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=data_path,
column_mapping={"url": "${data.url}"},
)
run2 = pf.run(
flow=f"{FLOWS_DIR}/classification_accuracy_evaluation",
data=data_path,
run=run1.name,
column_mapping={
"groundtruth": "${data.answer}",
"prediction": "${run.outputs.category}",
"variant_id": "${data.variant_id}",
},
)
pf.visualize([run1, run2])
def test_incomplete_run_visualize(
self, azure_open_ai_connection: AzureOpenAIConnection, pf, capfd: pytest.CaptureFixture
) -> None:
failed_run = pf.run(
flow=f"{FLOWS_DIR}/failed_flow",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"text": "${data.url}"},
)
# "update" run status to failed since currently all run will be completed unless there's bug
pf.runs.update(
name=failed_run.name,
status="Failed",
)
# patch logger.error to print, so that we can capture the error message using capfd
from promptflow._sdk.operations import _run_operations
_run_operations.logger.error = print
pf.visualize(failed_run)
captured = capfd.readouterr()
expected_error_message = (
f"Cannot visualize non-completed run. Run {failed_run.name!r} is not completed, the status is 'Failed'."
)
assert expected_error_message in captured.out
def test_flow_bulk_run_with_additional_includes(self, azure_open_ai_connection: AzureOpenAIConnection, pf):
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
run = pf.run(flow=f"{FLOWS_DIR}/web_classification_with_additional_include", data=data_path)
additional_includes = _get_additional_includes(run.flow / "flow.dag.yaml")
snapshot_path = Path.home() / ".promptflow" / ".runs" / run.name / "snapshot"
for item in additional_includes:
assert (snapshot_path / Path(item).name).exists()
# Addition includes in snapshot is removed
additional_includes = _get_additional_includes(snapshot_path / "flow.dag.yaml")
assert not additional_includes
def test_input_mapping_source_not_found_error(self, azure_open_ai_connection: AzureOpenAIConnection, pf):
# input_mapping source not found error won't create run
name = str(uuid.uuid4())
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
run = pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=data_path,
column_mapping={"not_exist": "${data.not_exist_key}"},
name=name,
)
assert_run_with_invalid_column_mapping(pf, run)
def test_input_mapping_with_dict(self, azure_open_ai_connection: AzureOpenAIConnection, pf):
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
run = pf.run(
flow=f"{FLOWS_DIR}/flow_with_dict_input",
data=data_path,
column_mapping={"key": {"value": "1"}, "url": "${data.url}"},
)
outputs = pf.runs._get_outputs(run=run)
assert "dict" in outputs["output"][0]
def test_run_exist_error(self, pf):
name = str(uuid.uuid4())
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
pf.run(
name=name,
flow=f"{FLOWS_DIR}/flow_with_dict_input",
data=data_path,
column_mapping={"key": {"value": "1"}, "url": "${data.url}"},
)
# create a new run won't affect original run
with pytest.raises(RunExistsError):
pf.run(
name=name,
flow=f"{FLOWS_DIR}/flow_with_dict_input",
data=data_path,
column_mapping={"key": {"value": "1"}, "url": "${data.url}"},
)
run = pf.runs.get(name)
assert run.status == RunStatus.COMPLETED
assert not os.path.exists(run._output_path / LocalStorageFilenames.EXCEPTION)
def test_run_local_storage_structure(self, local_client, pf) -> None:
run = create_run_against_multi_line_data(pf)
local_storage = LocalStorageOperations(local_client.runs.get(run.name))
run_output_path = local_storage.path
assert (Path(run_output_path) / "flow_outputs").is_dir()
assert (Path(run_output_path) / "flow_outputs" / "output.jsonl").is_file()
assert (Path(run_output_path) / "flow_artifacts").is_dir()
# 3 line runs for webClassification3.jsonl
assert len([_ for _ in (Path(run_output_path) / "flow_artifacts").iterdir()]) == 3
assert (Path(run_output_path) / "node_artifacts").is_dir()
# 5 nodes web classification flow DAG
assert len([_ for _ in (Path(run_output_path) / "node_artifacts").iterdir()]) == 5
def test_run_snapshot_with_flow_tools_json(self, local_client, pf) -> None:
run = create_run_against_multi_line_data(pf)
local_storage = LocalStorageOperations(local_client.runs.get(run.name))
assert (local_storage._snapshot_folder_path / ".promptflow").is_dir()
assert (local_storage._snapshot_folder_path / ".promptflow" / "flow.tools.json").is_file()
def test_get_metrics_format(self, local_client, pf) -> None:
run1 = create_run_against_multi_line_data(pf)
run2 = create_run_against_run(pf, run1)
# ensure the result is a flatten dict
assert local_client.runs.get_metrics(run2.name).keys() == {"accuracy"}
def test_get_detail_format(self, local_client, pf) -> None:
run = create_run_against_multi_line_data(pf)
# data is a jsonl file, so we can know the number of line runs
with open(f"{DATAS_DIR}/webClassification3.jsonl", "r") as f:
data = f.readlines()
number_of_lines = len(data)
local_storage = LocalStorageOperations(local_client.runs.get(run.name))
detail = local_storage.load_detail()
assert isinstance(detail, dict)
# flow runs
assert "flow_runs" in detail
assert isinstance(detail["flow_runs"], list)
assert len(detail["flow_runs"]) == number_of_lines
# node runs
assert "node_runs" in detail
assert isinstance(detail["node_runs"], list)
def test_run_logs(self, pf):
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
run = pf.run(
flow=f"{FLOWS_DIR}/flow_with_user_output",
data=data_path,
column_mapping={"key": {"value": "1"}, "url": "${data.url}"},
)
local_storage = LocalStorageOperations(run=run)
logs = local_storage.logger.get_logs()
# For Batch run, the executor uses bulk logger to print logs, and only prints the error log of the nodes.
existing_keywords = ["execution", "execution.bulk", "WARNING", "error log"]
assert all([keyword in logs for keyword in existing_keywords])
non_existing_keywords = ["execution.flow", "user log"]
assert all([keyword not in logs for keyword in non_existing_keywords])
def test_get_detail_against_partial_fail_run(self, pf) -> None:
run = pf.run(
flow=f"{FLOWS_DIR}/partial_fail",
data=f"{FLOWS_DIR}/partial_fail/data.jsonl",
)
detail = pf.runs.get_details(name=run.name)
detail.fillna("", inplace=True)
assert len(detail) == 3
def test_flow_with_only_static_values(self, pf):
name = str(uuid.uuid4())
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
with pytest.raises(UserErrorException) as e:
pf.run(
flow=f"{FLOWS_DIR}/flow_with_dict_input",
data=data_path,
column_mapping={"key": {"value": "1"}},
name=name,
)
assert "Column mapping must contain at least one mapping binding" in str(e.value)
# run should not be created
with pytest.raises(RunNotFoundError):
pf.runs.get(name=name)
def test_error_message_dump(self, pf):
failed_run = pf.run(
flow=f"{FLOWS_DIR}/failed_flow",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"text": "${data.url}"},
)
# even if all lines failed, the bulk run's status is completed.
assert failed_run.status == "Completed"
# error messages will store in local
local_storage = LocalStorageOperations(failed_run)
assert os.path.exists(local_storage._exception_path)
exception = local_storage.load_exception()
assert "Failed to run 1/1 lines. First error message is" in exception["message"]
# line run failures will be stored in additionalInfo
assert len(exception["additionalInfo"][0]["info"]["errors"]) == 1
# show run will get error message
run = pf.runs.get(name=failed_run.name)
run_dict = run._to_dict()
assert "error" in run_dict
assert run_dict["error"] == exception
@pytest.mark.skipif(RecordStorage.is_replaying_mode(), reason="System metrics not supported in replaying mode")
def test_system_metrics_in_properties(self, pf) -> None:
run = create_run_against_multi_line_data(pf)
assert FlowRunProperties.SYSTEM_METRICS in run.properties
assert isinstance(run.properties[FlowRunProperties.SYSTEM_METRICS], dict)
assert "total_tokens" in run.properties[FlowRunProperties.SYSTEM_METRICS]
def test_run_get_inputs(self, pf):
# inputs should be persisted when defaults are used
run = pf.run(
flow=f"{FLOWS_DIR}/default_input",
data=f"{DATAS_DIR}/webClassification1.jsonl",
)
inputs = pf.runs._get_inputs(run=run)
assert inputs == {
"line_number": [0],
"input_bool": [False],
"input_dict": [{}],
"input_list": [[]],
"input_str": ["input value from default"],
}
# inputs should be persisted when data value are used
run = pf.run(
flow=f"{FLOWS_DIR}/flow_with_dict_input",
data=f"{DATAS_DIR}/dictInput1.jsonl",
)
inputs = pf.runs._get_inputs(run=run)
assert inputs == {"key": [{"key": "value in data"}], "line_number": [0]}
# inputs should be persisted when column-mapping are used
run = pf.run(
flow=f"{FLOWS_DIR}/flow_with_dict_input",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"key": {"value": "value in column-mapping"}, "url": "${data.url}"},
)
inputs = pf.runs._get_inputs(run=run)
assert inputs == {
"key": [{"value": "value in column-mapping"}],
"line_number": [0],
"url": ["https://www.youtube.com/watch?v=o5ZQyXaAv1g"],
}
def test_executor_logs_in_batch_run_logs(self, pf) -> None:
run = create_run_against_multi_line_data_without_llm(pf)
local_storage = LocalStorageOperations(run=run)
logs = local_storage.logger.get_logs()
# below warning is printed by executor before the batch run executed
# the warning message results from we do not use column mapping
# so it is expected to be printed here
assert "Starting run without column mapping may lead to unexpected results." in logs
def test_basic_image_flow_bulk_run(self, pf, local_client) -> None:
image_flow_path = f"{FLOWS_DIR}/python_tool_with_simple_image"
data_path = f"{image_flow_path}/image_inputs/inputs.jsonl"
result = pf.run(flow=image_flow_path, data=data_path, column_mapping={"image": "${data.image}"})
run = local_client.runs.get(name=result.name)
assert run.status == "Completed"
assert "error" not in run._to_dict()
def test_python_tool_with_composite_image(self, pf) -> None:
image_flow_path = f"{FLOWS_DIR}/python_tool_with_composite_image"
data_path = f"{image_flow_path}/inputs.jsonl"
result = pf.run(
flow=image_flow_path,
data=data_path,
column_mapping={
"image_list": "${data.image_list}",
"image_dict": "${data.image_dict}",
},
)
run = pf.runs.get(name=result.name)
assert run.status == "Completed"
# no error when processing lines
assert "error" not in run._to_dict()
# test input from output
result = pf.run(
run=result,
flow=image_flow_path,
column_mapping={
"image_list": "${run.outputs.output}"
# image dict will use default value, which is relative to flow's folder
},
)
run = pf.runs.get(name=result.name)
assert run.status == "Completed"
# no error when processing lines
assert "error" not in run._to_dict()
def test_image_without_default(self, pf):
image_flow_path = f"{FLOWS_DIR}/python_tool_with_simple_image_without_default"
data_path = f"{DATAS_DIR}/image_inputs"
result = pf.run(
flow=image_flow_path,
data=data_path,
column_mapping={
"image_1": "${data.image}",
"image_2": "${data.image}",
},
)
run = pf.runs.get(name=result.name)
assert run.status == "Completed", run.name
# no error when processing lines
assert "error" not in run._to_dict(), run.name
def test_get_details_for_image_in_flow(self, pf) -> None:
image_flow_path = f"{FLOWS_DIR}/python_tool_with_simple_image"
data_path = f"{image_flow_path}/image_inputs/inputs.jsonl"
run = pf.run(
flow=image_flow_path,
data=data_path,
column_mapping={"image": "${data.image}"},
)
details = pf.get_details(run.name)
for i in range(len(details)):
input_image_path = details["inputs.image"][i]["data:image/png;path"]
assert Path(input_image_path).is_absolute()
output_image_path = details["outputs.output"][i]["data:image/png;path"]
assert Path(output_image_path).is_absolute()
def test_stream_raise_on_error_false(self, pf: PFClient, capfd: pytest.CaptureFixture) -> None:
data_path = f"{DATAS_DIR}/webClassification3.jsonl"
run = pf.run(
flow=f"{FLOWS_DIR}/web_classification",
data=data_path,
column_mapping={"not_exist": "${data.not_exist_key}"},
name=str(uuid.uuid4()),
)
# raise_on_error=False, will print error message in stdout
pf.stream(run.name, raise_on_error=False)
out, _ = capfd.readouterr()
assert "The input for batch run is incorrect. Couldn't find these mapping relations" in out
def test_stream_canceled_run(self, pf: PFClient, capfd: pytest.CaptureFixture) -> None:
run = create_run_against_multi_line_data_without_llm(pf)
pf.runs.update(name=run.name, status=RunStatus.CANCELED)
# (default) raise_on_error=True
with pytest.raises(InvalidRunStatusError):
pf.stream(run.name)
# raise_on_error=False
pf.stream(run.name, raise_on_error=False)
out, _ = capfd.readouterr()
assert "Run is canceled." in out
def test_specify_run_output_path(self, pf: PFClient, mocker: MockerFixture) -> None:
# mock to imitate user specify config run.output_path
specified_run_output_path = (Path.home() / PROMPT_FLOW_DIR_NAME / ".mock").resolve().as_posix()
with mocker.patch(
"promptflow._sdk._configuration.Configuration.get_run_output_path",
return_value=specified_run_output_path,
):
run = create_run_against_multi_line_data_without_llm(pf)
local_storage = LocalStorageOperations(run=run)
expected_output_path_prefix = (Path(specified_run_output_path) / run.name).resolve().as_posix()
assert local_storage.outputs_folder.as_posix().startswith(expected_output_path_prefix)
def test_override_run_output_path_in_pf_client(self) -> None:
specified_run_output_path = (Path.home() / PROMPT_FLOW_DIR_NAME / ".another_mock").resolve().as_posix()
pf = PFClient(config={"run.output_path": specified_run_output_path})
run = create_run_against_multi_line_data_without_llm(pf)
local_storage = LocalStorageOperations(run=run)
expected_output_path_prefix = (Path(specified_run_output_path) / run.name).resolve().as_posix()
assert local_storage.outputs_folder.as_posix().startswith(expected_output_path_prefix)
def test_specify_run_output_path_with_macro(self, pf: PFClient, mocker: MockerFixture) -> None:
# mock to imitate user specify invalid config run.output_path
with mocker.patch(
"promptflow._sdk._configuration.Configuration.get_run_output_path",
return_value=f"{FLOW_DIRECTORY_MACRO_IN_CONFIG}/.promptflow",
):
for _ in range(3):
run = create_run_against_multi_line_data_without_llm(pf)
local_storage = LocalStorageOperations(run=run)
expected_path_prefix = Path(FLOWS_DIR) / "print_env_var" / ".promptflow" / run.name
expected_path_prefix = expected_path_prefix.resolve().as_posix()
assert local_storage.outputs_folder.as_posix().startswith(expected_path_prefix)
def test_specify_run_output_path_with_invalid_macro(self, pf: PFClient, mocker: MockerFixture) -> None:
# mock to imitate user specify invalid config run.output_path
with mocker.patch(
"promptflow._sdk._configuration.Configuration.get_run_output_path",
# this case will happen when user manually modifies ~/.promptflow/pf.yaml
return_value=f"{FLOW_DIRECTORY_MACRO_IN_CONFIG}",
):
run = create_run_against_multi_line_data_without_llm(pf)
# as the specified run output path is invalid
# the actual run output path will be the default value
local_storage = LocalStorageOperations(run=run)
expected_output_path_prefix = (Path.home() / PROMPT_FLOW_DIR_NAME / ".runs" / run.name).resolve().as_posix()
assert local_storage.outputs_folder.as_posix().startswith(expected_output_path_prefix)
def test_failed_run_to_dict_exclude(self, pf):
failed_run = pf.run(
flow=f"{FLOWS_DIR}/failed_flow",
data=f"{DATAS_DIR}/webClassification1.jsonl",
column_mapping={"text": "${data.url}"},
)
default = failed_run._to_dict()
# CLI will exclude additional info and debug info
exclude = failed_run._to_dict(exclude_additional_info=True, exclude_debug_info=True)
assert "additionalInfo" in default["error"] and "additionalInfo" not in exclude["error"]
assert "debugInfo" in default["error"] and "debugInfo" not in exclude["error"]
def test_create_run_with_existing_run_folder(self, pf):
# TODO: Should use fixture to create an run and download it to be used here.
run_name = "web_classification_variant_0_20231205_120253_104100"
# clean the run if exists
from promptflow._cli._utils import _try_delete_existing_run_record
_try_delete_existing_run_record(run_name)
# assert the run doesn't exist
with pytest.raises(RunNotFoundError):
pf.runs.get(run_name)
# create the run with run folder
run_folder = f"{RUNS_DIR}/{run_name}"
run = Run._load_from_source(source=run_folder)
pf.runs.create_or_update(run)
# test with other local run operations
run = pf.runs.get(run_name)
assert run.name == run_name
details = pf.get_details(run_name)
assert details.shape == (3, 5)
metrics = pf.runs.get_metrics(run_name)
assert metrics == {}
pf.stream(run_name)
pf.visualize([run_name])
def test_aggregation_node_failed(self, pf):
failed_run = pf.run(
flow=f"{FLOWS_DIR}/aggregation_node_failed",
data=f"{FLOWS_DIR}/aggregation_node_failed/data.jsonl",
)
# even if all lines failed, the bulk run's status is completed.
assert failed_run.status == "Completed"
# error messages will store in local
local_storage = LocalStorageOperations(failed_run)
assert os.path.exists(local_storage._exception_path)
exception = local_storage.load_exception()
assert "First error message is" in exception["message"]
# line run failures will be stored in additionalInfo
assert len(exception["additionalInfo"][0]["info"]["errors"]) == 1
# show run will get error message
run = pf.runs.get(name=failed_run.name)
run_dict = run._to_dict()
assert "error" in run_dict
assert run_dict["error"] == exception
def test_get_details_against_partial_completed_run(self, pf: PFClient, monkeypatch) -> None:
# TODO: remove this patch after executor switch to default spawn
monkeypatch.setenv("PF_BATCH_METHOD", "spawn")
flow_mod2 = f"{FLOWS_DIR}/mod-n/two"
flow_mod3 = f"{FLOWS_DIR}/mod-n/three"
data_path = f"{DATAS_DIR}/numbers.jsonl"
# batch run against data
run1 = pf.run(
flow=flow_mod2,
data=data_path,
column_mapping={"number": "${data.value}"},
)
pf.runs.stream(run1)
details1 = pf.get_details(run1)
assert len(details1) == 20
assert len(details1.loc[details1["outputs.output"] != "(Failed)"]) == 10
# assert to ensure inputs and outputs are aligned
for _, row in details1.iterrows():
if str(row["outputs.output"]) != "(Failed)":
assert int(row["inputs.number"]) == int(row["outputs.output"])
# batch run against previous run
run2 = pf.run(
flow=flow_mod3,
run=run1,
column_mapping={"number": "${run.outputs.output}"},
)
pf.runs.stream(run2)
details2 = pf.get_details(run2)
assert len(details2) == 10
assert len(details2.loc[details2["outputs.output"] != "(Failed)"]) == 4
# assert to ensure inputs and outputs are aligned
for _, row in details2.iterrows():
if str(row["outputs.output"]) != "(Failed)":
assert int(row["inputs.number"]) == int(row["outputs.output"])
monkeypatch.delenv("PF_BATCH_METHOD")
def test_flow_with_nan_inf(self, pf: PFClient, monkeypatch) -> None:
# TODO: remove this patch after executor switch to default spawn
monkeypatch.setenv("PF_BATCH_METHOD", "spawn")
run = pf.run(
flow=f"{FLOWS_DIR}/flow-with-nan-inf",
data=f"{DATAS_DIR}/numbers.jsonl",
column_mapping={"number": "${data.value}"},
)
pf.stream(run)
local_storage = LocalStorageOperations(run=run)
# default behavior: no special logic for nan and inf
detail = local_storage.load_detail()
first_line_run_output = detail["flow_runs"][0]["output"]["output"]
assert isinstance(first_line_run_output["nan"], float)
assert np.isnan(first_line_run_output["nan"])
assert isinstance(first_line_run_output["inf"], float)
assert np.isinf(first_line_run_output["inf"])
# handles nan and inf, which is real scenario during visualize
detail = local_storage.load_detail(parse_const_as_str=True)
first_line_run_output = detail["flow_runs"][0]["output"]["output"]
assert isinstance(first_line_run_output["nan"], str)
assert first_line_run_output["nan"] == "NaN"
assert isinstance(first_line_run_output["inf"], str)
assert first_line_run_output["inf"] == "Infinity"
monkeypatch.delenv("PF_BATCH_METHOD")
@pytest.mark.skip("Enable this when executor change merges")
def test_eager_flow_run_without_yaml(self, pf):
# TODO(2898455): support this
flow_path = Path(f"{EAGER_FLOWS_DIR}/simple_without_yaml/entry.py")
run = pf.run(
flow=flow_path,
entry="my_flow",
data=f"{DATAS_DIR}/simple_eager_flow_data.jsonl",
)
assert run.status == "Completed"
def test_eager_flow_run_with_yaml(self, pf):
flow_path = Path(f"{EAGER_FLOWS_DIR}/simple_with_yaml")
run = pf.run(
flow=flow_path,
data=f"{DATAS_DIR}/simple_eager_flow_data.jsonl",
)
assert run.status == "Completed"
def test_eager_flow_test_invalid_cases(self, pf):
# no entry provided
flow_path = Path(f"{EAGER_FLOWS_DIR}/simple_without_yaml/entry.py")
with pytest.raises(UserErrorException) as e:
pf.run(
flow=flow_path,
data=f"{DATAS_DIR}/simple_eager_flow_data.jsonl",
)
assert "Entry function is not specified" in str(e.value)
# no path provided
flow_path = Path(f"{EAGER_FLOWS_DIR}/invalid_no_path/")
with pytest.raises(ValidationError) as e:
pf.run(
flow=flow_path,
data=f"{DATAS_DIR}/simple_eager_flow_data.jsonl",
)
assert "'path': ['Missing data for required field.']" in str(e.value)
def test_get_incomplete_run(self, local_client, pf) -> None:
with tempfile.TemporaryDirectory() as temp_dir:
shutil.copytree(f"{FLOWS_DIR}/print_env_var", f"{temp_dir}/print_env_var")
run = pf.run(
flow=f"{temp_dir}/print_env_var",
data=f"{DATAS_DIR}/env_var_names.jsonl",
)
# remove run dag
shutil.rmtree(f"{temp_dir}/print_env_var")
# can still get run operations
LocalStorageOperations(run=run)
# can to_dict
run._to_dict()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_serve_azureml_extension.py | import json
import pytest
@pytest.mark.usefixtures("recording_injection")
@pytest.mark.e2etest
def test_azureml_serving_api_with_encoded_connection(flow_serving_client_with_encoded_connection):
response = flow_serving_client_with_encoded_connection.get("/health")
assert b'{"status":"Healthy","version":"0.0.1"}' in response.data
response = flow_serving_client_with_encoded_connection.post("/score", data=json.dumps({"text": "hi"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "output_prompt" in json.loads(response.data.decode())
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_serve.py | import json
import os
import re
import pytest
from promptflow._core.operation_context import OperationContext
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_swagger(flow_serving_client):
swagger_dict = json.loads(flow_serving_client.get("/swagger.json").data.decode())
assert swagger_dict == {
"components": {"securitySchemes": {"bearerAuth": {"scheme": "bearer", "type": "http"}}},
"info": {
"title": "Promptflow[basic-with-connection] API",
"version": "1.0.0",
"x-flow-name": "basic-with-connection",
},
"openapi": "3.0.0",
"paths": {
"/score": {
"post": {
"requestBody": {
"content": {
"application/json": {
"example": {"text": "Hello World!"},
"schema": {
"properties": {"text": {"type": "string"}},
"required": ["text"],
"type": "object",
},
}
},
"description": "promptflow input data",
"required": True,
},
"responses": {
"200": {
"content": {
"application/json": {
"schema": {"properties": {"output_prompt": {"type": "string"}}, "type": "object"}
}
},
"description": "successful operation",
},
"400": {"description": "Invalid input"},
"default": {"description": "unexpected error"},
},
"summary": "run promptflow: basic-with-connection with an given input",
}
}
},
"security": [{"bearerAuth": []}],
}
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_chat_swagger(serving_client_llm_chat):
swagger_dict = json.loads(serving_client_llm_chat.get("/swagger.json").data.decode())
assert swagger_dict == {
"components": {"securitySchemes": {"bearerAuth": {"scheme": "bearer", "type": "http"}}},
"info": {
"title": "Promptflow[chat_flow_with_stream_output] API",
"version": "1.0.0",
"x-flow-name": "chat_flow_with_stream_output",
"x-chat-history": "chat_history",
"x-chat-input": "question",
"x-flow-type": "chat",
"x-chat-output": "answer",
},
"openapi": "3.0.0",
"paths": {
"/score": {
"post": {
"requestBody": {
"content": {
"application/json": {
"example": {},
"schema": {
"properties": {
"chat_history": {
"type": "array",
"items": {"type": "object", "additionalProperties": {}},
},
"question": {"type": "string", "default": "What is ChatGPT?"},
},
"required": ["chat_history", "question"],
"type": "object",
},
}
},
"description": "promptflow input data",
"required": True,
},
"responses": {
"200": {
"content": {
"application/json": {
"schema": {"properties": {"answer": {"type": "string"}}, "type": "object"}
}
},
"description": "successful operation",
},
"400": {"description": "Invalid input"},
"default": {"description": "unexpected error"},
},
"summary": "run promptflow: chat_flow_with_stream_output with an given input",
}
}
},
"security": [{"bearerAuth": []}],
}
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_user_agent(flow_serving_client):
operation_context = OperationContext.get_instance()
assert "test-user-agent" in operation_context.get_user_agent()
assert "promptflow-local-serving" in operation_context.get_user_agent()
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_serving_api(flow_serving_client):
response = flow_serving_client.get("/health")
assert b'{"status":"Healthy","version":"0.0.1"}' in response.data
response = flow_serving_client.get("/")
print(response.data)
assert response.status_code == 200
response = flow_serving_client.post("/score", data=json.dumps({"text": "hi"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "output_prompt" in json.loads(response.data.decode())
# Assert environment variable resolved
assert os.environ["API_TYPE"] == "azure"
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_evaluation_flow_serving_api(evaluation_flow_serving_client):
response = evaluation_flow_serving_client.post("/score", data=json.dumps({"url": "https://www.microsoft.com/"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "category" in json.loads(response.data.decode())
@pytest.mark.e2etest
def test_unknown_api(flow_serving_client):
response = flow_serving_client.get("/unknown")
assert b"not supported by current app" in response.data
assert response.status_code == 404
response = flow_serving_client.post("/health") # health api should be GET
assert b"not supported by current app" in response.data
assert response.status_code == 404
@pytest.mark.usefixtures("recording_injection", "setup_local_connection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_llm_chat(
serving_client_llm_chat,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"question": "What is the capital of France?",
"chat_history": [],
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_llm_chat.post("/score", json=payload, headers=headers)
assert response.status_code == expected_status_code
assert response.content_type == expected_content_type
if response.status_code == 406:
assert response.json["error"]["code"] == "UserError"
assert (
f"Media type {accept} in Accept header is not acceptable. Supported media type(s) -"
in response.json["error"]["message"]
)
if "text/event-stream" in response.content_type:
for line in response.data.decode().split("\n"):
print(line)
else:
result = response.json
print(result)
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 200, "text/event-stream; charset=utf-8"),
("text/html", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "text/event-stream; charset=utf-8"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_python_stream_tools(
serving_client_python_stream_tools,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"text": "Hello World!",
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = serving_client_python_stream_tools.post("/score", json=payload, headers=headers)
assert response.status_code == expected_status_code
assert response.content_type == expected_content_type
# The predefined flow in this test case is echo flow, which will return the input text.
# Check output as test logic validation.
# Stream generator generating logic
# - The output is split into words, and each word is sent as a separate event
# - Event data is a dict { $flowoutput_field_name : $word}
# - The event data is formatted as f"data: {json.dumps(data)}\n\n"
# - Generator will yield the event data for each word
if response.status_code == 200:
expected_output = f"Echo: {payload.get('text')}"
if "text/event-stream" in response.content_type:
words = expected_output.split()
lines = response.data.decode().split("\n\n")
# The last line is empty
lines = lines[:-1]
assert all(f"data: {json.dumps({'output_echo' : f'{w} '})}" == l for w, l in zip(words, lines))
else:
# For json response, iterator is joined into a string with "" as delimiter
words = expected_output.split()
merged_text = "".join(word + " " for word in words)
expected_json = {"output_echo": merged_text}
result = response.json
assert expected_json == result
elif response.status_code == 406:
assert response.json["error"]["code"] == "UserError"
assert (
f"Media type {accept} in Accept header is not acceptable. Supported media type(s) -"
in response.json["error"]["message"]
)
@pytest.mark.usefixtures("recording_injection")
@pytest.mark.e2etest
@pytest.mark.parametrize(
"accept, expected_status_code, expected_content_type",
[
("text/event-stream", 406, "application/json"),
("application/json", 200, "application/json"),
("*/*", 200, "application/json"),
("text/event-stream, application/json", 200, "application/json"),
("application/json, */*", 200, "application/json"),
("", 200, "application/json"),
],
)
def test_stream_python_nonstream_tools(
flow_serving_client,
accept,
expected_status_code,
expected_content_type,
):
payload = {
"text": "Hello World!",
}
headers = {
"Content-Type": "application/json",
"Accept": accept,
}
response = flow_serving_client.post("/score", json=payload, headers=headers)
if "text/event-stream" in response.content_type:
for line in response.data.decode().split("\n"):
print(line)
else:
result = response.json
print(result)
assert response.status_code == expected_status_code
assert response.content_type == expected_content_type
@pytest.mark.usefixtures("serving_client_image_python_flow", "recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_image_flow(serving_client_image_python_flow, sample_image):
response = serving_client_image_python_flow.post("/score", data=json.dumps({"image": sample_image}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
response = json.loads(response.data.decode())
assert {"output"} == response.keys()
key_regex = re.compile(r"data:image/(.*);base64")
assert re.match(key_regex, list(response["output"].keys())[0])
@pytest.mark.usefixtures("serving_client_composite_image_flow", "recording_injection", "setup_local_connection")
@pytest.mark.e2etest
def test_list_image_flow(serving_client_composite_image_flow, sample_image):
image_dict = {"data:image/jpg;base64": sample_image}
response = serving_client_composite_image_flow.post(
"/score", data=json.dumps({"image_list": [image_dict], "image_dict": {"my_image": image_dict}})
)
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
response = json.loads(response.data.decode())
assert {"output"} == response.keys()
assert (
"data:image/jpg;base64" in response["output"][0]
), f"data:image/jpg;base64 not in output list {response['output']}"
@pytest.mark.usefixtures("serving_client_with_environment_variables")
@pytest.mark.e2etest
def test_flow_with_environment_variables(serving_client_with_environment_variables):
except_environment_variables = {
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
}
for key, value in except_environment_variables.items():
response = serving_client_with_environment_variables.post("/score", data=json.dumps({"key": key}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
response = json.loads(response.data.decode())
assert {"output"} == response.keys()
assert response["output"] == value
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_test.py | import logging
import tempfile
from pathlib import Path
from types import GeneratorType
import papermill
import pytest
from marshmallow import ValidationError
from promptflow._sdk._constants import LOGGER_NAME
from promptflow._sdk._pf_client import PFClient
from promptflow.exceptions import UserErrorException
PROMOTFLOW_ROOT = Path(__file__) / "../../../.."
TEST_ROOT = Path(__file__).parent.parent.parent
MODEL_ROOT = TEST_ROOT / "test_configs/e2e_samples"
CONNECTION_FILE = (PROMOTFLOW_ROOT / "connections.json").resolve().absolute().as_posix()
FLOWS_DIR = (TEST_ROOT / "test_configs/flows").resolve().absolute().as_posix()
EAGER_FLOWS_DIR = (TEST_ROOT / "test_configs/eager_flows").resolve().absolute().as_posix()
FLOW_RESULT_KEYS = ["category", "evidence"]
_client = PFClient()
@pytest.mark.usefixtures(
"use_secrets_config_file", "recording_injection", "setup_local_connection", "install_custom_tool_pkg"
)
@pytest.mark.sdk_test
@pytest.mark.e2etest
class TestFlowTest:
def test_pf_test_flow(self):
inputs = {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
flow_path = Path(f"{FLOWS_DIR}/web_classification").absolute()
result = _client.test(flow=flow_path, inputs=inputs)
assert all([key in FLOW_RESULT_KEYS for key in result])
result = _client.test(flow=f"{FLOWS_DIR}/web_classification")
assert all([key in FLOW_RESULT_KEYS for key in result])
def test_pf_test_flow_with_package_tool_with_custom_strong_type_connection(self, install_custom_tool_pkg):
inputs = {"text": "Hello World!"}
flow_path = Path(f"{FLOWS_DIR}/flow_with_package_tool_with_custom_strong_type_connection").absolute()
# Test that connection would be custom strong type in flow
result = _client.test(flow=flow_path, inputs=inputs)
assert result == {"out": "connection_value is MyFirstConnection: True"}
# Test node run
result = _client.test(flow=flow_path, inputs={"input_text": "Hello World!"}, node="My_Second_Tool_usi3")
assert result == "Hello World!This is my first custom connection."
def test_pf_test_flow_with_package_tool_with_custom_connection_as_input_value(self, install_custom_tool_pkg):
# Prepare custom connection
from promptflow.connections import CustomConnection
conn = CustomConnection(name="custom_connection_3", secrets={"api_key": "test"}, configs={"api_base": "test"})
_client.connections.create_or_update(conn)
inputs = {"text": "Hello World!"}
flow_path = Path(f"{FLOWS_DIR}/flow_with_package_tool_with_custom_connection").absolute()
# Test that connection would be custom strong type in flow
result = _client.test(flow=flow_path, inputs=inputs)
assert result == {"out": "connection_value is MyFirstConnection: True"}
def test_pf_test_flow_with_script_tool_with_custom_strong_type_connection(self):
# Prepare custom connection
from promptflow.connections import CustomConnection
conn = CustomConnection(name="custom_connection_2", secrets={"api_key": "test"}, configs={"api_url": "test"})
_client.connections.create_or_update(conn)
inputs = {"text": "Hello World!"}
flow_path = Path(f"{FLOWS_DIR}/flow_with_script_tool_with_custom_strong_type_connection").absolute()
# Test that connection would be custom strong type in flow
result = _client.test(flow=flow_path, inputs=inputs)
assert result == {"out": "connection_value is MyCustomConnection: True"}
# Test node run
result = _client.test(flow=flow_path, inputs={"input_param": "Hello World!"}, node="my_script_tool")
assert result == "connection_value is MyCustomConnection: True"
def test_pf_test_with_streaming_output(self):
flow_path = Path(f"{FLOWS_DIR}/chat_flow_with_stream_output")
result = _client.test(flow=flow_path)
chat_output = result["answer"]
assert isinstance(chat_output, GeneratorType)
assert "".join(chat_output)
flow_path = Path(f"{FLOWS_DIR}/basic_with_builtin_llm_node")
result = _client.test(flow=flow_path)
chat_output = result["output"]
assert isinstance(chat_output, str)
def test_pf_test_node(self):
inputs = {"classify_with_llm.output": '{"category": "App", "evidence": "URL"}'}
flow_path = Path(f"{FLOWS_DIR}/web_classification").absolute()
result = _client.test(flow=flow_path, inputs=inputs, node="convert_to_dict")
assert all([key in FLOW_RESULT_KEYS for key in result])
def test_pf_test_flow_with_variant(self):
inputs = {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
result = _client.test(
flow=f"{FLOWS_DIR}/web_classification", inputs=inputs, variant="${summarize_text_content.variant_1}"
)
assert all([key in FLOW_RESULT_KEYS for key in result])
@pytest.mark.skip("TODO this test case failed in windows and Mac")
def test_pf_test_with_additional_includes(self, caplog):
from promptflow import VERSION
print(VERSION)
with caplog.at_level(level=logging.WARNING, logger=LOGGER_NAME):
inputs = {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
result = _client.test(flow=f"{FLOWS_DIR}/web_classification_with_additional_include", inputs=inputs)
duplicate_file_content = "Found duplicate file in additional includes"
assert any([duplicate_file_content in record.message for record in caplog.records])
assert all([key in FLOW_RESULT_KEYS for key in result])
inputs = {"classify_with_llm.output": '{"category": "App", "evidence": "URL"}'}
result = _client.test(flow=f"{FLOWS_DIR}/web_classification", inputs=inputs, node="convert_to_dict")
assert all([key in FLOW_RESULT_KEYS for key in result])
# Test additional includes don't exist
with pytest.raises(UserErrorException) as e:
_client.test(flow=f"{FLOWS_DIR}/web_classification_with_invalid_additional_include")
assert "Unable to find additional include ../invalid/file/path" in str(e.value)
def test_pf_flow_test_with_symbolic(self, prepare_symbolic_flow):
inputs = {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
result = _client.test(flow=f"{FLOWS_DIR}/web_classification_with_additional_include", inputs=inputs)
assert all([key in FLOW_RESULT_KEYS for key in result])
inputs = {"classify_with_llm.output": '{"category": "App", "evidence": "URL"}'}
result = _client.test(flow=f"{FLOWS_DIR}/web_classification", inputs=inputs, node="convert_to_dict")
assert all([key in FLOW_RESULT_KEYS for key in result])
def test_pf_flow_test_with_exception(self, capsys):
# Test flow with exception
inputs = {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
flow_path = Path(f"{FLOWS_DIR}/web_classification_with_exception").absolute()
with pytest.raises(UserErrorException) as exception:
_client.test(flow=flow_path, inputs=inputs)
assert "Execution failure in 'convert_to_dict': (Exception) mock exception" in str(exception.value)
# Test node with exception
inputs = {"classify_with_llm.output": '{"category": "App", "evidence": "URL"}'}
with pytest.raises(Exception) as exception:
_client.test(flow=flow_path, inputs=inputs, node="convert_to_dict")
output = capsys.readouterr()
assert "convert_to_dict.py" in output.out
assert "mock exception" in str(exception.value)
def test_node_test_with_connection_input(self):
flow_path = Path(f"{FLOWS_DIR}/basic-with-connection").absolute()
inputs = {
"connection": "azure_open_ai_connection",
"hello_prompt.output": "system:\n Your task is to write python program for me\nuser:\n"
"Write a simple Hello World! program that displays "
"the greeting message when executed.",
}
result = _client.test(
flow=flow_path,
inputs=inputs,
node="echo_my_prompt",
environment_variables={"API_TYPE": "${azure_open_ai_connection.api_type}"},
)
assert result
def test_pf_flow_with_aggregation(self):
flow_path = Path(f"{FLOWS_DIR}/classification_accuracy_evaluation").absolute()
inputs = {"variant_id": "variant_0", "groundtruth": "Pdf", "prediction": "PDF"}
result = _client._flows._test(flow=flow_path, inputs=inputs)
assert "calculate_accuracy" in result.node_run_infos
assert result.run_info.metrics == {"accuracy": 1.0}
def test_generate_tool_meta_in_additional_folder(self):
flow_path = Path(f"{FLOWS_DIR}/web_classification_with_additional_include").absolute()
flow_tools, _ = _client._flows._generate_tools_meta(flow=flow_path)
for tool in flow_tools["code"].values():
assert (Path(flow_path) / tool["source"]).exists()
def test_pf_test_with_non_english_input(self):
result = _client.test(flow=f"{FLOWS_DIR}/flow_with_non_english_input")
assert result["output"] == "Hello 日本語"
def test_pf_node_test_with_dict_input(self):
flow_path = Path(f"{FLOWS_DIR}/flow_with_dict_input").absolute()
flow_inputs = {"key": {"input_key": "input_value"}}
result = _client._flows._test(flow=flow_path, inputs=flow_inputs)
assert result.run_info.status.value == "Completed"
inputs = {
"get_dict_val.output.value": result.node_run_infos["get_dict_val"].output,
"get_dict_val.output.origin_value": result.node_run_infos["get_dict_val"].output,
}
node_result = _client._flows._test(flow=flow_path, node="print_val", inputs=inputs)
assert node_result.status.value == "Completed"
inputs = {
"val": result.node_run_infos["get_dict_val"].output,
"origin_val": result.node_run_infos["get_dict_val"].output
}
node_result = _client._flows._test(flow=flow_path, node="print_val", inputs=inputs)
assert node_result.status.value == "Completed"
def test_pf_node_test_with_node_ref(self):
flow_path = Path(f"{FLOWS_DIR}/flow_with_dict_input").absolute()
flow_inputs = {"key": {"input_key": "input_value"}}
result = _client._flows._test(flow=flow_path, inputs=flow_inputs)
assert result.run_info.status.value == "Completed"
# Test node ref with reference node output names
inputs = {
"get_dict_val.output.value": result.node_run_infos["get_dict_val"].output["value"],
"get_dict_val.output.origin_value": result.node_run_infos["get_dict_val"].output["origin_value"],
}
ref_result = _client._flows._test(flow=flow_path, node="print_val", inputs=inputs)
assert ref_result.status.value == "Completed"
# Test node ref with testing node input names
inputs = {
"val": result.node_run_infos["get_dict_val"].output["value"],
"origin_val": result.node_run_infos["get_dict_val"].output["origin_value"],
}
variable_result = _client._flows._test(flow=flow_path, node="print_val", inputs=inputs)
assert variable_result.status.value == "Completed"
def test_pf_test_flow_in_notebook(self):
notebook_path = Path(f"{TEST_ROOT}/test_configs/notebooks/dummy.ipynb").absolute()
with tempfile.TemporaryDirectory() as temp_dir:
output_notebook_path = Path(temp_dir) / "output.ipynb"
papermill.execute_notebook(
notebook_path,
output_path=output_notebook_path,
cwd=notebook_path.parent,
)
def test_eager_flow_test(self):
flow_path = Path(f"{EAGER_FLOWS_DIR}/simple_without_yaml/entry.py").absolute()
result = _client._flows._test(flow=flow_path, entry="my_flow", inputs={"input_val": "val1"})
assert result.run_info.status.value == "Completed"
def test_eager_flow_test_with_yaml(self):
flow_path = Path(f"{EAGER_FLOWS_DIR}/simple_with_yaml/").absolute()
result = _client._flows._test(flow=flow_path, inputs={"input_val": "val1"})
assert result.run_info.status.value == "Completed"
def test_eager_flow_test_with_primitive_output(self):
flow_path = Path(f"{EAGER_FLOWS_DIR}/primitive_output/").absolute()
result = _client._flows._test(flow=flow_path, inputs={"input_val": "val1"})
assert result.run_info.status.value == "Completed"
def test_eager_flow_test_invalid_cases(self):
# no entry provided
flow_path = Path(f"{EAGER_FLOWS_DIR}/simple_without_yaml/entry.py").absolute()
with pytest.raises(UserErrorException) as e:
_client._flows._test(flow=flow_path, inputs={"input_val": "val1"})
assert "Entry function is not specified" in str(e.value)
# no path provided
flow_path = Path(f"{EAGER_FLOWS_DIR}/invalid_no_path/").absolute()
with pytest.raises(ValidationError) as e:
_client._flows._test(flow=flow_path, inputs={"input_val": "val1"})
assert "'path': ['Missing data for required field.']" in str(e.value)
# dup entries provided
flow_path = Path(f"{EAGER_FLOWS_DIR}/simple_with_yaml/").absolute()
with pytest.raises(UserErrorException) as e:
_client._flows._test(flow=flow_path, entry="my_flow", inputs={"input_val": "val1"})
assert "Specifying entry function is not allowed" in str(e.value)
# wrong entry provided
# required inputs not provided
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_local_operations.py | import copy
import os.path
import re
import shutil
import tempfile
from pathlib import Path
import mock
import pytest
from promptflow._sdk._constants import FLOW_TOOLS_JSON, NODE_VARIANTS, PROMPT_FLOW_DIR_NAME, USE_VARIANTS
from promptflow._utils.yaml_utils import load_yaml
from promptflow.connections import AzureOpenAIConnection
PROMOTFLOW_ROOT = Path(__file__) / "../../../.."
TEST_ROOT = Path(__file__).parent.parent.parent
MODEL_ROOT = TEST_ROOT / "test_configs/e2e_samples"
CONNECTION_FILE = (PROMOTFLOW_ROOT / "connections.json").resolve().absolute().as_posix()
FLOWS_DIR = "./tests/test_configs/flows"
DATAS_DIR = "./tests/test_configs/datas"
def e2e_test_docker_build_and_run(output_path):
"""Build and run the docker image locally.
This function is for adhoc local test and need to run on a dev machine with docker installed.
"""
import subprocess
subprocess.check_output(["docker", "build", ".", "-t", "test"], cwd=output_path)
subprocess.check_output(["docker", "tag", "test", "elliotz/promptflow-export-result:latest"], cwd=output_path)
subprocess.check_output(
[
"docker",
"run",
"-e",
"CUSTOM_CONNECTION_AZURE_OPENAI_API_KEY='xxx'" "elliotz/promptflow-export-result:latest",
],
cwd=output_path,
)
@pytest.fixture
def setup_connections(azure_open_ai_connection: AzureOpenAIConnection):
_ = {
"azure_open_ai_connection": azure_open_ai_connection,
}
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._connection import _Connection
_client = PFClient()
_client.connections.create_or_update(
_Connection._load(
data={
"name": "custom_connection",
"type": "custom",
"configs": {
"CHAT_DEPLOYMENT_NAME": "gpt-35-turbo",
"AZURE_OPENAI_API_BASE": azure_open_ai_connection.api_base,
},
"secrets": {
"AZURE_OPENAI_API_KEY": azure_open_ai_connection.api_key,
},
}
)
)
_client.connections.create_or_update(
_Connection._load(
data={
"name": "azure_open_ai_connection",
"type": "azure_open_ai",
"api_type": azure_open_ai_connection.api_type,
"api_base": azure_open_ai_connection.api_base,
"api_version": azure_open_ai_connection.api_version,
"api_key": azure_open_ai_connection.api_key,
}
)
)
@pytest.mark.usefixtures("use_secrets_config_file", "setup_connections")
@pytest.mark.sdk_test
@pytest.mark.e2etest
class TestFlowLocalOperations:
def test_flow_build_as_docker(self, pf) -> None:
source = f"{FLOWS_DIR}/intent-copilot"
output_path = f"{FLOWS_DIR}/export/linux"
shutil.rmtree(output_path, ignore_errors=True)
(Path(source) / ".runs").mkdir(exist_ok=True)
(Path(source) / ".runs" / "dummy_run_file").touch()
with mock.patch("promptflow._sdk.operations._flow_operations.generate_random_string") as mock_random_string:
mock_random_string.return_value = "dummy1"
pf.flows.build(
flow=source,
output=output_path,
format="docker",
)
assert mock_random_string.call_count == 1
# check if .amlignore works
assert os.path.isdir(f"{source}/data")
assert not (Path(output_path) / "flow" / "data").exists()
# check if .runs is ignored by default
assert os.path.isfile(f"{source}/.runs/dummy_run_file")
assert not (Path(output_path) / "flow" / ".runs" / "dummy_run_file").exists()
# e2e_test_docker_build_and_run(output_path)
def test_flow_build_as_docker_with_additional_includes(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
with tempfile.TemporaryDirectory() as temp_dir:
pf.flows.build(
flow=source,
output=temp_dir,
format="docker",
)
for additional_include in [
"../external_files/convert_to_dict.py",
"../external_files/fetch_text_content_from_url.py",
"../external_files/summarize_text_content.jinja2",
]:
additional_include_path = Path(source, additional_include)
target_path = Path(temp_dir, "flow", additional_include_path.name)
assert target_path.is_file()
assert target_path.read_text() == additional_include_path.read_text()
def test_flow_build_flow_only(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
with tempfile.TemporaryDirectory() as temp_dir:
pf.flows.build(
flow=source,
output=temp_dir,
format="docker",
flow_only=True,
)
for additional_include in [
"../external_files/convert_to_dict.py",
"../external_files/fetch_text_content_from_url.py",
"../external_files/summarize_text_content.jinja2",
]:
additional_include_path = Path(source, additional_include)
target_path = Path(temp_dir, additional_include_path.name)
assert target_path.is_file()
assert target_path.read_text() == additional_include_path.read_text()
assert Path(temp_dir, PROMPT_FLOW_DIR_NAME, FLOW_TOOLS_JSON).is_file()
with open(Path(temp_dir, "flow.dag.yaml"), "r", encoding="utf-8") as f:
flow_dag_content = load_yaml(f)
assert NODE_VARIANTS not in flow_dag_content
assert "additional_includes" not in flow_dag_content
assert not any([USE_VARIANTS in node for node in flow_dag_content["nodes"]])
def test_flow_build_as_docker_with_variant(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
flow_dag_path = Path(source, "flow.dag.yaml")
flow_dag = load_yaml(flow_dag_path)
with tempfile.TemporaryDirectory() as temp_dir:
pf.flows.build(
flow=source,
output=temp_dir,
format="docker",
variant="${summarize_text_content.variant_0}",
)
new_flow_dag_path = Path(temp_dir, "flow", "flow.dag.yaml")
new_flow_dag = load_yaml(new_flow_dag_path)
target_node = next(filter(lambda x: x["name"] == "summarize_text_content", new_flow_dag["nodes"]))
target_node.pop("name")
assert target_node == flow_dag["node_variants"]["summarize_text_content"]["variants"]["variant_0"]["node"]
def test_flow_build_generate_flow_tools_json(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
with tempfile.TemporaryDirectory() as temp_dir:
pf.flows.build(
flow=source,
output=temp_dir,
variant="${summarize_text_content.variant_0}",
)
flow_tools_path = Path(temp_dir) / "flow" / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
assert flow_tools_path.is_file()
# package in flow.tools.json is not determined by the flow, so we don't check it here
assert load_yaml(flow_tools_path)["code"] == {
"classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "classify_with_llm.jinja2",
"type": "llm",
},
"convert_to_dict.py": {
"function": "convert_to_dict",
"inputs": {"input_str": {"type": ["string"]}},
"source": "convert_to_dict.py",
"type": "python",
},
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": "fetch_text_content_from_url.py",
"type": "python",
},
"prepare_examples.py": {
"function": "prepare_examples",
"source": "prepare_examples.py",
"type": "python",
},
"summarize_text_content.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": "summarize_text_content.jinja2",
"type": "llm",
},
}
def test_flow_validate_generate_flow_tools_json(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
flow_tools_path = Path(source) / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
flow_tools_path.unlink(missing_ok=True)
validation_result = pf.flows.validate(flow=source)
assert validation_result.passed
assert flow_tools_path.is_file()
# package in flow.tools.json is not determined by the flow, so we don't check it here
assert load_yaml(flow_tools_path)["code"] == {
"classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "classify_with_llm.jinja2",
"type": "llm",
},
"convert_to_dict.py": {
"function": "convert_to_dict",
"inputs": {"input_str": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "convert_to_dict.py"),
"type": "python",
},
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "fetch_text_content_from_url.py"),
"type": "python",
},
"prepare_examples.py": {
"function": "prepare_examples",
"source": "prepare_examples.py",
"type": "python",
},
"summarize_text_content.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "summarize_text_content.jinja2"),
"type": "llm",
},
"summarize_text_content__variant_1.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": "summarize_text_content__variant_1.jinja2",
"type": "llm",
},
}
def test_flow_validation_failed(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_invalid"
flow_tools_path = Path(source) / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
flow_tools_path.unlink(missing_ok=True)
validation_result = pf.flows.validate(flow=source)
error_messages = copy.deepcopy(validation_result.error_messages)
assert "Failed to load python module from file" in error_messages.pop("nodes.2.source.path", "")
for yaml_path in [
"node_variants.summarize_text_content.variants.variant_0.node.source.path",
"nodes.1.source.path",
]:
assert re.search(r"Meta file '.*' can not be found.", error_messages.pop(yaml_path, ""))
assert error_messages == {
"inputs.url.type": "Missing data for required field.",
"outputs.category.type": "Missing data for required field.",
}
assert "line 22" in repr(validation_result)
assert flow_tools_path.is_file()
flow_tools = load_yaml(flow_tools_path)
assert "code" in flow_tools
assert flow_tools["code"] == {
"classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "classify_with_llm.jinja2",
"type": "prompt",
},
"./classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "./classify_with_llm.jinja2",
"type": "llm",
},
"convert_to_dict.py": {
"function": "convert_to_dict",
"inputs": {"input_str": {"type": ["string"]}},
"source": "convert_to_dict.py",
"type": "python",
},
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "fetch_text_content_from_url.py"),
"type": "python",
},
"summarize_text_content__variant_1.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": "summarize_text_content__variant_1.jinja2",
"type": "llm",
},
}
def test_flow_generate_tools_meta(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_invalid"
tools_meta, tools_error = pf.flows._generate_tools_meta(source)
assert tools_meta["code"] == {
"classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "classify_with_llm.jinja2",
"type": "prompt",
},
"./classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "./classify_with_llm.jinja2",
"type": "llm",
},
"convert_to_dict.py": {
"function": "convert_to_dict",
"inputs": {"input_str": {"type": ["string"]}},
"source": "convert_to_dict.py",
"type": "python",
},
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "fetch_text_content_from_url.py"),
"type": "python",
},
"summarize_text_content__variant_1.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": "summarize_text_content__variant_1.jinja2",
"type": "llm",
},
}
# promptflow-tools is not installed in ci
# assert list(tools_meta["package"]) == ["promptflow.tools.azure_translator.get_translation"]
assert "Failed to load python module from file" in tools_error.pop("prepare_examples.py", "")
assert re.search(r"Meta file '.*' can not be found.", tools_error.pop("summarize_text_content.jinja2", ""))
assert tools_error == {}
tools_meta, tools_error = pf.flows._generate_tools_meta(source, source_name="summarize_text_content.jinja2")
assert tools_meta == {"code": {}, "package": {}}
assert re.search(r"Meta file '.*' can not be found.", tools_error.pop("summarize_text_content.jinja2", ""))
assert tools_error == {}
tools_meta, tools_error = pf.flows._generate_tools_meta(source, source_name="fetch_text_content_from_url.py")
assert tools_meta == {
"code": {
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "fetch_text_content_from_url.py"),
"type": "python",
},
},
"package": {},
}
assert tools_error == {}
@pytest.mark.skip(reason="It will fail in CI for some reasons. Still need to investigate.")
def test_flow_generate_tools_meta_timeout(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_invalid"
for tools_meta, tools_error in [
pf.flows._generate_tools_meta(source, timeout=1),
# There is no built-in method to forcefully stop a running thread in Python
# because abruptly stopping a thread can cause issues like resource leaks,
# deadlocks, or inconsistent states.
# Caller (VSCode extension) will handle the timeout error.
# pf.flows._generate_tools_meta(source, source_name="convert_to_dict.py", timeout=1),
]:
assert tools_meta == {"code": {}, "package": {}}
assert tools_error
for error in tools_error.values():
assert "timeout" in error
def test_flow_generate_tools_meta_with_pkg_tool_with_custom_strong_type_connection(self, pf) -> None:
source = f"{FLOWS_DIR}/flow_with_package_tool_with_custom_strong_type_connection"
tools_meta, tools_error = pf.flows._generate_tools_meta(source)
assert tools_error == {}
assert tools_meta["code"] == {}
assert tools_meta["package"] == {
"my_tool_package.tools.my_tool_1.my_tool": {
"function": "my_tool",
"inputs": {
"connection": {
"type": ["CustomConnection"],
"custom_type": ["MyFirstConnection", "MySecondConnection"],
},
"input_text": {"type": ["string"]},
},
"module": "my_tool_package.tools.my_tool_1",
"name": "My First Tool",
"description": "This is my first tool",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2",
},
"my_tool_package.tools.my_tool_2.MyTool.my_tool": {
"class_name": "MyTool",
"function": "my_tool",
"inputs": {
"connection": {"type": ["CustomConnection"], "custom_type": ["MySecondConnection"]},
"input_text": {"type": ["string"]},
},
"module": "my_tool_package.tools.my_tool_2",
"name": "My Second Tool",
"description": "This is my second tool",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2",
},
}
def test_flow_generate_tools_meta_with_script_tool_with_custom_strong_type_connection(self, pf) -> None:
source = f"{FLOWS_DIR}/flow_with_script_tool_with_custom_strong_type_connection"
tools_meta, tools_error = pf.flows._generate_tools_meta(source)
assert tools_error == {}
assert tools_meta["package"] == {}
assert tools_meta["code"] == {
"my_script_tool.py": {
"function": "my_tool",
"inputs": {"connection": {"type": ["CustomConnection"]}, "input_param": {"type": ["string"]}},
"source": "my_script_tool.py",
"type": "python",
}
}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_experiment.py | from pathlib import Path
import pytest
from ruamel.yaml import YAML
from promptflow import PFClient
from promptflow._sdk._constants import ExperimentStatus, RunStatus
from promptflow._sdk._load_functions import load_common
from promptflow._sdk.entities._experiment import (
CommandNode,
Experiment,
ExperimentData,
ExperimentInput,
ExperimentTemplate,
FlowNode,
)
TEST_ROOT = Path(__file__).parent.parent.parent
EXP_ROOT = TEST_ROOT / "test_configs/experiments"
FLOW_ROOT = TEST_ROOT / "test_configs/flows"
yaml = YAML(typ="safe")
@pytest.mark.e2etest
@pytest.mark.usefixtures("setup_experiment_table")
class TestExperiment:
def test_experiment_from_template(self):
template_path = EXP_ROOT / "basic-no-script-template" / "basic.exp.yaml"
# Load template and create experiment
template = load_common(ExperimentTemplate, source=template_path)
experiment = Experiment.from_template(template)
# Assert experiment parts are resolved
assert len(experiment.nodes) == 2
assert all(isinstance(n, FlowNode) for n in experiment.nodes)
assert len(experiment.data) == 1
assert isinstance(experiment.data[0], ExperimentData)
assert len(experiment.inputs) == 1
assert isinstance(experiment.inputs[0], ExperimentInput)
# Assert type is resolved
assert experiment.inputs[0].default == 1
# Pop schema and resolve path
expected = dict(yaml.load(open(template_path, "r", encoding="utf-8").read()))
expected.pop("$schema")
expected["data"][0]["path"] = (FLOW_ROOT / "web_classification" / "data.jsonl").absolute().as_posix()
expected["nodes"][0]["path"] = (experiment._output_dir / "snapshots" / "main").absolute().as_posix()
expected["nodes"][1]["path"] = (experiment._output_dir / "snapshots" / "eval").absolute().as_posix()
experiment_dict = experiment._to_dict()
assert experiment_dict["data"][0].items() == expected["data"][0].items()
assert experiment_dict["nodes"][0].items() == expected["nodes"][0].items()
assert experiment_dict["nodes"][1].items() == expected["nodes"][1].items()
assert experiment_dict.items() >= expected.items()
def test_experiment_from_template_with_script_node(self):
template_path = EXP_ROOT / "basic-script-template" / "basic-script.exp.yaml"
# Load template and create experiment
template = load_common(ExperimentTemplate, source=template_path)
experiment = Experiment.from_template(template)
# Assert command node load correctly
assert len(experiment.nodes) == 4
expected = dict(yaml.load(open(template_path, "r", encoding="utf-8").read()))
experiment_dict = experiment._to_dict()
assert isinstance(experiment.nodes[0], CommandNode)
assert isinstance(experiment.nodes[1], FlowNode)
assert isinstance(experiment.nodes[2], FlowNode)
assert isinstance(experiment.nodes[3], CommandNode)
gen_data_snapshot_path = experiment._output_dir / "snapshots" / "gen_data"
echo_snapshot_path = experiment._output_dir / "snapshots" / "echo"
expected["nodes"][0]["code"] = gen_data_snapshot_path.absolute().as_posix()
expected["nodes"][3]["code"] = echo_snapshot_path.absolute().as_posix()
expected["nodes"][3]["environment_variables"] = {}
assert experiment_dict["nodes"][0].items() == expected["nodes"][0].items()
assert experiment_dict["nodes"][3].items() == expected["nodes"][3].items()
# Assert snapshots
assert gen_data_snapshot_path.exists()
file_count = len(list(gen_data_snapshot_path.rglob("*")))
assert file_count == 1
assert (gen_data_snapshot_path / "generate_data.py").exists()
# Assert no file exists in echo path
assert echo_snapshot_path.exists()
file_count = len(list(echo_snapshot_path.rglob("*")))
assert file_count == 0
def test_experiment_create_and_get(self):
template_path = EXP_ROOT / "basic-no-script-template" / "basic.exp.yaml"
# Load template and create experiment
template = load_common(ExperimentTemplate, source=template_path)
experiment = Experiment.from_template(template)
client = PFClient()
exp = client._experiments.create_or_update(experiment)
assert len(client._experiments.list()) > 0
exp_get = client._experiments.get(name=exp.name)
assert exp_get._to_dict() == exp._to_dict()
@pytest.mark.usefixtures("use_secrets_config_file", "recording_injection", "setup_local_connection")
def test_experiment_start(self):
template_path = EXP_ROOT / "basic-no-script-template" / "basic.exp.yaml"
# Load template and create experiment
template = load_common(ExperimentTemplate, source=template_path)
experiment = Experiment.from_template(template)
client = PFClient()
exp = client._experiments.create_or_update(experiment)
exp = client._experiments.start(exp.name)
assert exp.status == ExperimentStatus.TERMINATED
# Assert main run
assert len(exp.node_runs["main"]) > 0
main_run = client.runs.get(name=exp.node_runs["main"][0]["name"])
assert main_run.status == RunStatus.COMPLETED
assert main_run.variant == "${summarize_text_content.variant_0}"
assert main_run.display_name == "main"
assert len(exp.node_runs["eval"]) > 0
# Assert eval run and metrics
eval_run = client.runs.get(name=exp.node_runs["eval"][0]["name"])
assert eval_run.status == RunStatus.COMPLETED
assert eval_run.display_name == "eval"
metrics = client.runs.get_metrics(name=eval_run.name)
assert "accuracy" in metrics
@pytest.mark.usefixtures("use_secrets_config_file", "recording_injection", "setup_local_connection")
def test_experiment_with_script_start(self):
template_path = EXP_ROOT / "basic-script-template" / "basic-script.exp.yaml"
# Load template and create experiment
template = load_common(ExperimentTemplate, source=template_path)
experiment = Experiment.from_template(template)
client = PFClient()
exp = client._experiments.create_or_update(experiment)
exp = client._experiments.start(exp.name)
assert exp.status == ExperimentStatus.TERMINATED
assert len(exp.node_runs) == 4
for key, val in exp.node_runs.items():
assert val[0]["status"] == RunStatus.COMPLETED, f"Node {key} run failed"
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_cli_perf.py | import contextlib
import io
import multiprocessing
import os
import sys
import tempfile
import timeit
import uuid
from pathlib import Path
from unittest import mock
import pytest
from promptflow._cli._user_agent import USER_AGENT as CLI_USER_AGENT # noqa: E402
from promptflow._sdk._telemetry import log_activity
from promptflow._sdk._utils import ClientUserAgentUtil
FLOWS_DIR = "./tests/test_configs/flows"
CONNECTIONS_DIR = "./tests/test_configs/connections"
DATAS_DIR = "./tests/test_configs/datas"
def mock_log_activity(*args, **kwargs):
custom_message = "github run: https://github.com/microsoft/promptflow/actions/runs/{0}".format(
os.environ.get("GITHUB_RUN_ID")
)
if len(args) == 4:
if args[3] is not None:
args[3]["custom_message"] = custom_message
else:
args = list(args)
args[3] = {"custom_message": custom_message}
elif "custom_dimensions" in kwargs and kwargs["custom_dimensions"] is not None:
kwargs["custom_dimensions"]["custom_message"] = custom_message
else:
kwargs["custom_dimensions"] = {"custom_message": custom_message}
return log_activity(*args, **kwargs)
def run_cli_command(cmd, time_limit=3600, result_queue=None):
from promptflow._cli._pf.entry import main
sys.argv = list(cmd)
output = io.StringIO()
st = timeit.default_timer()
with contextlib.redirect_stdout(output), mock.patch.object(
ClientUserAgentUtil, "get_user_agent"
) as get_user_agent_fun, mock.patch(
"promptflow._sdk._telemetry.activity.log_activity", side_effect=mock_log_activity
), mock.patch(
"promptflow._cli._pf.entry.log_activity", side_effect=mock_log_activity
):
# Client side will modify user agent only through ClientUserAgentUtil to avoid impact executor/runtime.
get_user_agent_fun.return_value = f"{CLI_USER_AGENT} perf_monitor/1.0"
user_agent = ClientUserAgentUtil.get_user_agent()
assert user_agent == f"{CLI_USER_AGENT} perf_monitor/1.0"
main()
ed = timeit.default_timer()
print(f"{cmd}, \n Total time: {ed - st}s")
assert ed - st < time_limit, f"The time limit is {time_limit}s, but it took {ed - st}s."
res_value = output.getvalue()
if result_queue:
result_queue.put(res_value)
return res_value
def subprocess_run_cli_command(cmd, time_limit=3600):
result_queue = multiprocessing.Queue()
process = multiprocessing.Process(
target=run_cli_command, args=(cmd,), kwargs={"time_limit": time_limit, "result_queue": result_queue}
)
process.start()
process.join()
assert process.exitcode == 0
return result_queue.get_nowait()
@pytest.mark.usefixtures("use_secrets_config_file", "setup_local_connection")
@pytest.mark.perf_monitor_test
class TestCliPerf:
def test_pf_run_create(self, time_limit=20) -> None:
res = subprocess_run_cli_command(
cmd=(
"pf",
"run",
"create",
"--flow",
f"{FLOWS_DIR}/print_input_flow",
"--data",
f"{DATAS_DIR}/print_input_flow.jsonl",
),
time_limit=time_limit,
)
assert "Completed" in res
def test_pf_run_update(self, time_limit=10) -> None:
run_name = str(uuid.uuid4())
run_cli_command(
cmd=(
"pf",
"run",
"create",
"--flow",
f"{FLOWS_DIR}/print_input_flow",
"--data",
f"{DATAS_DIR}/print_input_flow.jsonl",
"--name",
run_name,
)
)
res = subprocess_run_cli_command(
cmd=("pf", "run", "update", "--name", run_name, "--set", "description=test pf run update"),
time_limit=time_limit,
)
assert "Completed" in res
def test_pf_flow_test(self, time_limit=10):
subprocess_run_cli_command(
cmd=(
"pf",
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/print_input_flow",
"--inputs",
"text=https://www.youtube.com/watch?v=o5ZQyXaAv1g",
),
time_limit=time_limit,
)
output_path = Path(FLOWS_DIR) / "print_input_flow" / ".promptflow" / "flow.output.json"
assert output_path.exists()
def test_pf_flow_build(self, time_limit=20):
with tempfile.TemporaryDirectory() as temp_dir:
subprocess_run_cli_command(
cmd=(
"pf",
"flow",
"build",
"--source",
f"{FLOWS_DIR}/print_input_flow/flow.dag.yaml",
"--output",
temp_dir,
"--format",
"docker",
),
time_limit=time_limit,
)
def test_pf_connection_create(self, time_limit=10):
name = f"Connection_{str(uuid.uuid4())[:4]}"
res = subprocess_run_cli_command(
cmd=(
"pf",
"connection",
"create",
"--file",
f"{CONNECTIONS_DIR}/azure_openai_connection.yaml",
"--name",
f"{name}",
),
time_limit=time_limit,
)
assert "api_type" in res
def test_pf_connection_list(self, time_limit=10):
name = "connection_list"
res = run_cli_command(
cmd=(
"pf",
"connection",
"create",
"--file",
f"{CONNECTIONS_DIR}/azure_openai_connection.yaml",
"--name",
f"{name}",
)
)
assert "api_type" in res
res = subprocess_run_cli_command(cmd=("pf", "connection", "list"), time_limit=time_limit)
assert "api_type" in res
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_tool.py | import importlib.util
import json
import sys
from pathlib import Path
from unittest.mock import patch
import pytest
from promptflow import ToolProvider, tool
from promptflow._core.tool_meta_generator import ToolValidationError
from promptflow._sdk._pf_client import PFClient
from promptflow.entities import DynamicList, InputSetting
from promptflow.exceptions import UserErrorException
PROMOTFLOW_ROOT = Path(__file__) / "../../../.."
TEST_ROOT = Path(__file__).parent.parent.parent
TOOL_ROOT = TEST_ROOT / "test_configs/tools"
_client = PFClient()
@pytest.mark.e2etest
class TestTool:
def get_tool_meta(self, tool_path):
module_name = f"test_tool.{Path(tool_path).stem}"
# Load the module from the file path
spec = importlib.util.spec_from_file_location(module_name, tool_path)
module = importlib.util.module_from_spec(spec)
# Load the module's code
spec.loader.exec_module(module)
tools_meta, _ = _client.tools._generate_tool_meta(module)
return tools_meta
def test_python_tool_meta(self):
tool_path = TOOL_ROOT / "python_tool.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.python_tool.PythonTool.python_tool": {
"class_name": "PythonTool",
"function": "python_tool",
"inputs": {"connection": {"type": ["AzureOpenAIConnection"]}, "input1": {"type": ["string"]}},
"module": "test_tool.python_tool",
"name": "PythonTool.python_tool",
"type": "python",
},
"test_tool.python_tool.my_python_tool": {
"function": "my_python_tool",
"inputs": {"input1": {"type": ["string"]}},
"module": "test_tool.python_tool",
"name": "python_tool",
"type": "python",
},
"test_tool.python_tool.my_python_tool_without_name": {
"function": "my_python_tool_without_name",
"inputs": {"input1": {"type": ["string"]}},
"module": "test_tool.python_tool",
"name": "my_python_tool_without_name",
"type": "python",
},
}
assert tool_meta == expect_tool_meta
def test_llm_tool_meta(self):
tool_path = TOOL_ROOT / "custom_llm_tool.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.custom_llm_tool.my_tool": {
"name": "My Custom LLM Tool",
"type": "custom_llm",
"inputs": {"connection": {"type": ["CustomConnection"]}},
"description": "This is a tool to demonstrate the custom_llm tool type",
"module": "test_tool.custom_llm_tool",
"function": "my_tool",
"enable_kwargs": True,
},
"test_tool.custom_llm_tool.TestCustomLLMTool.tool_func": {
"name": "My Custom LLM Tool",
"type": "custom_llm",
"inputs": {"connection": {"type": ["AzureOpenAIConnection"]}, "api": {"type": ["string"]}},
"description": "This is a tool to demonstrate the custom_llm tool type",
"module": "test_tool.custom_llm_tool",
"class_name": "TestCustomLLMTool",
"function": "tool_func",
"enable_kwargs": True,
},
}
assert tool_meta == expect_tool_meta
def test_invalid_tool_type(self):
with pytest.raises(UserErrorException) as exception:
@tool(name="invalid_tool_type", type="invalid_type")
def invalid_tool_type():
pass
assert exception.value.message == "Tool type invalid_type is not supported yet."
def test_tool_with_custom_connection(self):
tool_path = TOOL_ROOT / "tool_with_custom_connection.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_custom_connection.MyTool.my_tool": {
"name": "My Second Tool",
"type": "python",
"inputs": {"connection": {"type": ["CustomConnection"]}, "input_text": {"type": ["string"]}},
"description": "This is my second tool",
"module": "test_tool.tool_with_custom_connection",
"class_name": "MyTool",
"function": "my_tool",
}
}
assert tool_meta == expect_tool_meta
tool_path = TOOL_ROOT / "tool_with_custom_strong_type_connection.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_custom_strong_type_connection.my_tool": {
"name": "Tool With Custom Strong Type Connection",
"type": "python",
"inputs": {
"connection": {"type": ["CustomConnection"], "custom_type": ["MyCustomConnection"]},
"input_text": {"type": ["string"]},
},
"description": "This is my tool with custom strong type connection.",
"module": "test_tool.tool_with_custom_strong_type_connection",
"function": "my_tool",
}
}
assert tool_meta == expect_tool_meta
def test_tool_with_input_settings(self):
tool_path = TOOL_ROOT / "tool_with_dynamic_list_input.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_dynamic_list_input.my_tool": {
"description": "This is my tool with dynamic list input",
"function": "my_tool",
"inputs": {
"endpoint_name": {
"dynamic_list": {
"func_kwargs": [
{
"default": "",
"name": "prefix",
"optional": True,
"reference": "${inputs.input_prefix}",
"type": ["string"],
}
],
"func_path": "test_tool.tool_with_dynamic_list_input.list_endpoint_names",
},
"type": ["string"],
},
"input_prefix": {"type": ["string"]},
"input_text": {
"allow_manual_entry": True,
"dynamic_list": {
"func_kwargs": [
{
"default": "",
"name": "prefix",
"optional": True,
"reference": "${inputs.input_prefix}",
"type": ["string"],
},
{"default": 10, "name": "size", "optional": True, "type": ["int"]},
],
"func_path": "test_tool.tool_with_dynamic_list_input.my_list_func",
},
"is_multi_select": True,
"type": ["list"],
},
},
"module": "test_tool.tool_with_dynamic_list_input",
"name": "My Tool with Dynamic List Input",
"type": "python",
}
}
assert tool_meta == expect_tool_meta
tool_path = TOOL_ROOT / "tool_with_enabled_by_value.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_enabled_by_value.my_tool": {
"name": "My Tool with Enabled By Value",
"type": "python",
"inputs": {
"user_type": {"type": ["string"], "enum": ["student", "teacher"]},
"student_id": {"type": ["string"], "enabled_by": "user_type", "enabled_by_value": ["student"]},
"teacher_id": {"type": ["string"], "enabled_by": "user_type", "enabled_by_value": ["teacher"]},
},
"description": "This is my tool with enabled by value",
"module": "test_tool.tool_with_enabled_by_value",
"function": "my_tool",
}
}
assert tool_meta == expect_tool_meta
def test_dynamic_list_with_invalid_reference(self):
def my_list_func(prefix: str, size: int = 10):
pass
# value in reference doesn't exist in tool inputs
invalid_dynamic_list_setting = DynamicList(function=my_list_func, input_mapping={"prefix": "invalid_input"})
input_settings = {
"input_text": InputSetting(
dynamic_list=invalid_dynamic_list_setting, allow_manual_entry=True, is_multi_select=True
)
}
@tool(
name="My Tool with Dynamic List Input",
description="This is my tool with dynamic list input",
input_settings=input_settings,
)
def my_tool(input_text: list, input_prefix: str) -> str:
return f"Hello {input_prefix} {','.join(input_text)}"
with pytest.raises(ToolValidationError) as exception:
_client.tools.validate(my_tool, raise_error=True)
assert "Cannot find invalid_input in the tool inputs." in exception.value.message
# invalid dynamic func input
invalid_dynamic_list_setting = DynamicList(
function=my_list_func, input_mapping={"invalid_input": "input_prefix"}
)
input_settings = {
"input_text": InputSetting(
dynamic_list=invalid_dynamic_list_setting, allow_manual_entry=True, is_multi_select=True
)
}
@tool(
name="My Tool with Dynamic List Input",
description="This is my tool with dynamic list input",
input_settings=input_settings,
)
def my_tool(input_text: list, input_prefix: str) -> str:
return f"Hello {input_prefix} {','.join(input_text)}"
with pytest.raises(ToolValidationError) as exception:
_client.tools.validate(my_tool, raise_error=True)
assert "Cannot find invalid_input in the inputs of dynamic_list func" in exception.value.message
# check required inputs of dynamic list func
invalid_dynamic_list_setting = DynamicList(function=my_list_func, input_mapping={"size": "input_prefix"})
input_settings = {
"input_text": InputSetting(
dynamic_list=invalid_dynamic_list_setting,
)
}
@tool(
name="My Tool with Dynamic List Input",
description="This is my tool with dynamic list input",
input_settings=input_settings,
)
def my_tool(input_text: list, input_prefix: str) -> str:
return f"Hello {input_prefix} {','.join(input_text)}"
with pytest.raises(ToolValidationError) as exception:
_client.tools.validate(my_tool, raise_error=True)
assert "Missing required input(s) of dynamic_list function: ['prefix']" in exception.value.message
def test_enabled_by_with_invalid_input(self):
# value in enabled_by_value doesn't exist in tool inputs
input1_settings = InputSetting(enabled_by="invalid_input")
@tool(name="enabled_by_with_invalid_input", input_settings={"input1": input1_settings})
def enabled_by_with_invalid_input(input1: str, input2: str):
pass
with pytest.raises(ToolValidationError) as exception:
_client.tools.validate(enabled_by_with_invalid_input, raise_error=True)
assert 'Cannot find the input \\"invalid_input\\"' in exception.value.message
def test_tool_with_file_path_input(self):
tool_path = TOOL_ROOT / "tool_with_file_path_input.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_file_path_input.my_tool": {
"name": "Tool with FilePath Input",
"type": "python",
"inputs": {"input_file": {"type": ["file_path"]}, "input_text": {"type": ["string"]}},
"description": "This is a tool to demonstrate the usage of FilePath input",
"module": "test_tool.tool_with_file_path_input",
"function": "my_tool",
}
}
assert expect_tool_meta == tool_meta
def test_tool_with_generated_by_input(self):
tool_path = TOOL_ROOT / "tool_with_generated_by_input.py"
tool_meta = self.get_tool_meta(tool_path)
with open(TOOL_ROOT / "expected_generated_by_meta.json", "r") as f:
expect_tool_meta = json.load(f)
assert expect_tool_meta == tool_meta
def test_validate_tool_script(self):
tool_script_path = TOOL_ROOT / "custom_llm_tool.py"
result = _client.tools.validate(tool_script_path)
assert result.passed
tool_script_path = TOOL_ROOT / "tool_with_dynamic_list_input.py"
result = _client.tools.validate(tool_script_path)
assert result.passed
tool_script_path = TOOL_ROOT / "invalid_tool.py"
result = _client.tools.validate(tool_script_path)
assert len(result._errors) == 4
assert "1 is not of type 'string'" in result.error_messages["invalid_schema_type"]
assert (
"Cannot provide both `icon` and `icon_light` or `icon_dark`." in result.error_messages["invalid_tool_icon"]
)
assert (
'Cannot find the input "invalid_input" for the enabled_by of teacher_id.'
in result.error_messages["invalid_input_settings"]
)
assert (
'Cannot find the input "invalid_input" for the enabled_by of student_id.'
in result.error_messages["invalid_input_settings"]
)
assert all(str(tool_script_path) == item.location for item in result._errors)
with pytest.raises(ToolValidationError):
_client.tools.validate(TOOL_ROOT / "invalid_tool.py", raise_error=True)
def test_validate_tool_func(self):
def load_module_by_path(source):
module_name = Path(source).stem
spec = importlib.util.spec_from_file_location(module_name, source)
module = importlib.util.module_from_spec(spec)
# Load the module's code
spec.loader.exec_module(module)
return module
tool_script_path = TOOL_ROOT / "custom_llm_tool.py"
module = load_module_by_path(tool_script_path)
tool_func = getattr(module, "my_tool")
result = _client.tools.validate(tool_func)
assert result.passed
tool_script_path = TOOL_ROOT / "invalid_tool.py"
module = load_module_by_path(tool_script_path)
tool_func = getattr(module, "invalid_schema_type")
result = _client.tools.validate(tool_func)
assert "invalid_schema_type" in result.error_messages
assert "1 is not of type 'string'" in result.error_messages["invalid_schema_type"]
assert "invalid_schema_type" == result._errors[0].function_name
assert str(tool_script_path) == result._errors[0].location
with pytest.raises(ToolValidationError):
_client.tools.validate(tool_func, raise_error=True)
def test_validate_package_tool(self):
package_tool_path = TOOL_ROOT / "tool_package"
sys.path.append(str(package_tool_path.resolve()))
import tool_package
with patch("promptflow._sdk.operations._tool_operations.ToolOperations._is_package_tool", return_value=True):
result = _client.tools.validate(tool_package)
assert len(result._errors) == 4
assert "1 is not of type 'string'" in result.error_messages["invalid_schema_type"]
assert (
"Cannot provide both `icon` and `icon_light` or `icon_dark`." in result.error_messages["invalid_tool_icon"]
)
assert (
'Cannot find the input "invalid_input" for the enabled_by of teacher_id.'
in result.error_messages["invalid_input_settings"]
)
assert (
'Cannot find the input "invalid_input" for the enabled_by of student_id.'
in result.error_messages["invalid_input_settings"]
)
def test_validate_tool_class(self):
from promptflow.tools.serpapi import SerpAPI
result = _client.tools.validate(SerpAPI)
assert result.passed
class InvalidToolClass(ToolProvider):
def __init__(self):
super().__init__()
@tool(name="My Custom Tool")
def tool_func(self, api: str):
pass
@tool(name=1)
def invalid_tool_func(self, api: str):
pass
result = _client.tools.validate(InvalidToolClass)
assert not result.passed
assert result._kwargs["total_count"] == 2
assert result._kwargs["invalid_count"] == 1
assert len(result._errors) == 1
assert "1 is not of type 'string'" in result._errors[0].message
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_executable.py | import subprocess
import sys
import tempfile
from pathlib import Path
import mock
import pytest
from .test_cli import run_pf_command
FLOWS_DIR = "./tests/test_configs/flows"
RUNS_DIR = "./tests/test_configs/runs"
CONNECTIONS_DIR = "./tests/test_configs/connections"
DATAS_DIR = "./tests/test_configs/datas"
@pytest.mark.usefixtures("use_secrets_config_file", "setup_local_connection", "install_custom_tool_pkg")
@pytest.mark.cli_test
@pytest.mark.e2etest
class TestExecutable:
@pytest.mark.skipif(
sys.platform == "win32" or sys.platform == "darwin",
reason="Raise Exception: Process terminated with exit code 4294967295",
)
def test_flow_build_executable(self):
source = f"{FLOWS_DIR}/web_classification/flow.dag.yaml"
target = "promptflow._sdk.operations._flow_operations.FlowOperations._run_pyinstaller"
with mock.patch(target) as mocked:
mocked.return_value = None
with tempfile.TemporaryDirectory() as temp_dir:
run_pf_command(
"flow",
"build",
"--source",
source,
"--output",
temp_dir,
"--format",
"executable",
)
# Start the Python script as a subprocess
app_file = Path(temp_dir, "app.py").as_posix()
process = subprocess.Popen(["python", app_file], stderr=subprocess.PIPE)
try:
# Wait for a specified time (in seconds)
wait_time = 5
process.wait(timeout=wait_time)
if process.returncode == 0:
pass
else:
raise Exception(
f"Process terminated with exit code {process.returncode}, "
f"{process.stderr.read().decode('utf-8')}"
)
except (subprocess.TimeoutExpired, KeyboardInterrupt):
pass
finally:
# Kill the process
process.terminate()
process.wait() # Ensure the process is fully terminated
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_as_func.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import shutil
from pathlib import Path
from tempfile import TemporaryDirectory
from types import GeneratorType
import pytest
from promptflow import load_flow
from promptflow._sdk._errors import ConnectionNotFoundError, InvalidFlowError
from promptflow._sdk.entities import CustomConnection
from promptflow._sdk.operations._flow_context_resolver import FlowContextResolver
from promptflow._utils.flow_utils import dump_flow_dag, load_flow_dag
from promptflow.entities import FlowContext
from promptflow.exceptions import UserErrorException
FLOWS_DIR = "./tests/test_configs/flows"
RUNS_DIR = "./tests/test_configs/runs"
DATAS_DIR = "./tests/test_configs/datas"
@pytest.mark.usefixtures(
"use_secrets_config_file", "recording_injection", "setup_local_connection", "install_custom_tool_pkg"
)
@pytest.mark.sdk_test
@pytest.mark.e2etest
class TestFlowAsFunc:
def test_flow_as_a_func(self):
f = load_flow(f"{FLOWS_DIR}/print_env_var")
result = f(key="unknown")
assert result["output"] is None
assert "line_number" not in result
def test_flow_as_a_func_with_connection_overwrite(self):
from promptflow._sdk._errors import ConnectionNotFoundError
f = load_flow(f"{FLOWS_DIR}/web_classification")
f.context.connections = {"classify_with_llm": {"connection": "not_exist"}}
with pytest.raises(ConnectionNotFoundError) as e:
f(url="https://www.youtube.com/watch?v=o5ZQyXaAv1g")
assert "Connection 'not_exist' is not found" in str(e.value)
def test_flow_as_a_func_with_connection_obj(self):
f = load_flow(f"{FLOWS_DIR}/flow_with_custom_connection")
f.context.connections = {"hello_node": {"connection": CustomConnection(secrets={"k": "v"})}}
result = f(text="hello")
assert result["output"]["secrets"] == {"k": "v"}
def test_overrides(self):
f = load_flow(f"{FLOWS_DIR}/print_env_var")
f.context = FlowContext(
# node print_env will take "provided_key" instead of flow input
overrides={"nodes.print_env.inputs.key": "provided_key"},
)
# the key="unknown" will not take effect
result = f(key="unknown")
assert result["output"] is None
@pytest.mark.skip(reason="This experience has not finalized yet.")
def test_flow_as_a_func_with_token_based_connection(self):
class MyCustomConnection(CustomConnection):
def get_token(self):
return "fake_token"
f = load_flow(f"{FLOWS_DIR}/flow_with_custom_connection")
f.context.connections = {"hello_node": {"connection": MyCustomConnection(secrets={"k": "v"})}}
result = f(text="hello")
assert result == {}
def test_exception_handle(self):
f = load_flow(f"{FLOWS_DIR}/flow_with_invalid_import")
with pytest.raises(UserErrorException) as e:
f(text="hello")
assert "Failed to load python module " in str(e.value)
f = load_flow(f"{FLOWS_DIR}/print_env_var")
with pytest.raises(UserErrorException) as e:
f()
assert "Required input fields ['key'] are missing" in str(e.value)
def test_stream_output(self):
f = load_flow(f"{FLOWS_DIR}/chat_flow_with_python_node_streaming_output")
f.context.streaming = True
result = f(
chat_history=[
{"inputs": {"chat_input": "Hi"}, "outputs": {"chat_output": "Hello! How can I assist you today?"}}
],
question="How are you?",
)
assert isinstance(result["answer"], GeneratorType)
@pytest.mark.skip(reason="This experience has not finalized yet.")
def test_environment_variables(self):
f = load_flow(f"{FLOWS_DIR}/print_env_var")
f.context.environment_variables = {"key": "value"}
result = f(key="key")
assert result["output"] == "value"
def test_flow_as_a_func_with_variant(self):
flow_path = Path(f"{FLOWS_DIR}/flow_with_dict_input_with_variant").absolute()
f = load_flow(
flow_path,
)
f.context.variant = "${print_val.variant1}"
# variant1 will use a mock_custom_connection
with pytest.raises(ConnectionNotFoundError) as e:
f(key="a")
assert "Connection 'mock_custom_connection' is not found." in str(e.value)
# non-exist variant
f.context.variant = "${print_val.variant_2}"
with pytest.raises(InvalidFlowError) as e:
f(key="a")
assert "Variant variant_2 not found for node print_val" in str(e.value)
def test_non_scrubbed_connection(self):
f = load_flow(f"{FLOWS_DIR}/flow_with_custom_connection")
f.context.connections = {"hello_node": {"connection": CustomConnection(secrets={"k": "*****"})}}
with pytest.raises(UserErrorException) as e:
f(text="hello")
assert "please make sure connection has decrypted secrets to use in flow execution." in str(e)
def test_local_connection_object(self, pf, azure_open_ai_connection):
f = load_flow(f"{FLOWS_DIR}/flow_with_custom_connection")
# local connection without secret will lead to error
connection = pf.connections.get("azure_open_ai_connection", with_secrets=False)
f.context.connections = {"hello_node": {"connection": connection}}
with pytest.raises(UserErrorException) as e:
f(text="hello")
assert "please make sure connection has decrypted secrets to use in flow execution." in str(e)
def test_non_secret_connection(self):
f = load_flow(f"{FLOWS_DIR}/flow_with_custom_connection")
# execute connection without secrets won't get error since the connection doesn't have scrubbed secrets
# we only raise error when there are scrubbed secrets in connection
f.context.connections = {"hello_node": {"connection": CustomConnection(secrets={})}}
f(text="hello")
def test_flow_context_cache(self):
# same flow context has same hash
assert hash(FlowContext()) == hash(FlowContext())
# getting executor for same flow will hit cache
flow1 = load_flow(f"{FLOWS_DIR}/print_env_var")
flow2 = load_flow(f"{FLOWS_DIR}/print_env_var")
flow_executor1 = FlowContextResolver.resolve(
flow=flow1,
)
flow_executor2 = FlowContextResolver.resolve(
flow=flow2,
)
assert flow_executor1 is flow_executor2
# getting executor for same flow + context will hit cache
flow1 = load_flow(f"{FLOWS_DIR}/flow_with_custom_connection")
flow1.context = FlowContext(connections={"hello_node": {"connection": CustomConnection(secrets={"k": "v"})}})
flow2 = load_flow(f"{FLOWS_DIR}/flow_with_custom_connection")
flow2.context = FlowContext(connections={"hello_node": {"connection": CustomConnection(secrets={"k": "v"})}})
flow_executor1 = FlowContextResolver.resolve(
flow=flow1,
)
flow_executor2 = FlowContextResolver.resolve(
flow=flow2,
)
assert flow_executor1 is flow_executor2
flow1 = load_flow(f"{FLOWS_DIR}/flow_with_dict_input_with_variant")
flow1.context = FlowContext(
variant="${print_val.variant1}",
connections={"print_val": {"conn": CustomConnection(secrets={"k": "v"})}},
overrides={"nodes.print_val.inputs.key": "a"},
)
flow2 = load_flow(f"{FLOWS_DIR}/flow_with_dict_input_with_variant")
flow2.context = FlowContext(
variant="${print_val.variant1}",
connections={"print_val": {"conn": CustomConnection(secrets={"k": "v"})}},
overrides={"nodes.print_val.inputs.key": "a"},
)
flow_executor1 = FlowContextResolver.resolve(flow=flow1)
flow_executor2 = FlowContextResolver.resolve(flow=flow2)
assert flow_executor1 is flow_executor2
def test_flow_cache_not_hit(self):
with TemporaryDirectory() as tmp_dir:
shutil.copytree(f"{FLOWS_DIR}/print_env_var", f"{tmp_dir}/print_env_var")
flow_path = Path(f"{tmp_dir}/print_env_var")
# load same file with different content will not hit cache
flow1 = load_flow(flow_path)
# update content
_, flow_dag = load_flow_dag(flow_path)
flow_dag["inputs"] = {"key": {"type": "string", "default": "key1"}}
dump_flow_dag(flow_dag, flow_path)
flow2 = load_flow(f"{tmp_dir}/print_env_var")
flow_executor1 = FlowContextResolver.resolve(
flow=flow1,
)
flow_executor2 = FlowContextResolver.resolve(
flow=flow2,
)
assert flow_executor1 is not flow_executor2
def test_flow_context_cache_not_hit(self):
flow1 = load_flow(f"{FLOWS_DIR}/flow_with_custom_connection")
flow1.context = FlowContext(connections={"hello_node": {"connection": CustomConnection(secrets={"k": "v"})}})
flow2 = load_flow(f"{FLOWS_DIR}/flow_with_custom_connection")
flow2.context = FlowContext(connections={"hello_node": {"connection": CustomConnection(secrets={"k2": "v"})}})
flow_executor1 = FlowContextResolver.resolve(
flow=flow1,
)
flow_executor2 = FlowContextResolver.resolve(
flow=flow2,
)
assert flow_executor1 is not flow_executor2
flow1 = load_flow(f"{FLOWS_DIR}/flow_with_dict_input_with_variant")
flow1.context = FlowContext(
variant="${print_val.variant1}",
connections={"print_val": {"conn": CustomConnection(secrets={"k": "v"})}},
overrides={"nodes.print_val.inputs.key": "a"},
)
flow2 = load_flow(f"{FLOWS_DIR}/flow_with_dict_input_with_variant")
flow2.context = FlowContext(
variant="${print_val.variant1}",
connections={"print_val": {"conn": CustomConnection(secrets={"k": "v"})}},
overrides={"nodes.print_val.inputs.key": "b"},
)
flow_executor1 = FlowContextResolver.resolve(flow=flow1)
flow_executor2 = FlowContextResolver.resolve(flow=flow2)
assert flow_executor1 is not flow_executor2
@pytest.mark.timeout(10)
def test_flow_as_func_perf_test(self):
# this test should not take long due to caching logic
f = load_flow(f"{FLOWS_DIR}/print_env_var")
for i in range(100):
f(key="key")
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_cli.py | import importlib
import importlib.util
import json
import logging
import multiprocessing
import os
import os.path
import shutil
import sys
import tempfile
import uuid
from pathlib import Path
from tempfile import mkdtemp
from typing import Dict, List
from unittest.mock import patch
import mock
import pytest
from promptflow._cli._pf.entry import main
from promptflow._constants import PF_USER_AGENT
from promptflow._core.operation_context import OperationContext
from promptflow._sdk._constants import LOGGER_NAME, SCRUBBED_VALUE, ExperimentStatus
from promptflow._sdk._errors import RunNotFoundError
from promptflow._sdk._utils import ClientUserAgentUtil, setup_user_agent_to_operation_context
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
from promptflow._sdk.operations._run_operations import RunOperations
from promptflow._utils.context_utils import _change_working_dir
from promptflow._utils.utils import environment_variable_overwrite, parse_ua_to_dict
from promptflow._utils.yaml_utils import dump_yaml, load_yaml
from promptflow.exceptions import UserErrorException
FLOWS_DIR = "./tests/test_configs/flows"
EXPERIMENT_DIR = "./tests/test_configs/experiments"
RUNS_DIR = "./tests/test_configs/runs"
CONNECTIONS_DIR = "./tests/test_configs/connections"
DATAS_DIR = "./tests/test_configs/datas"
TOOL_ROOT = "./tests/test_configs/tools"
TARGET_URL = "https://www.youtube.com/watch?v=o5ZQyXaAv1g"
# TODO: move this to a shared utility module
def run_pf_command(*args, cwd=None):
"""Run a pf command with the given arguments and working directory.
There have been some unknown issues in using subprocess on CI, so we use this function instead, which will also
provide better debugging experience.
"""
origin_argv, origin_cwd = sys.argv, os.path.abspath(os.curdir)
try:
sys.argv = ["pf"] + list(args)
if cwd:
os.chdir(cwd)
main()
finally:
sys.argv = origin_argv
os.chdir(origin_cwd)
def run_batch(local_client, line_timeout_seconds, timeout_index=None):
os.environ["PF_LINE_TIMEOUT_SEC"] = line_timeout_seconds
run_id = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/simple_flow_with_ten_inputs",
"--data",
f"{FLOWS_DIR}/simple_flow_with_ten_inputs/data.jsonl",
"--name",
run_id,
)
run = local_client.runs.get(name=run_id)
local_storage = LocalStorageOperations(run)
detail = local_storage.load_detail()
flow_runs_list = detail["flow_runs"]
for i, flow_run in enumerate(flow_runs_list):
if i == timeout_index:
assert flow_run["status"] == "Failed"
assert flow_run["error"]["message"] == f"Line {i} execution timeout for exceeding 54 seconds"
assert flow_run["error"]["code"] == "UserError"
assert flow_run["error"]["innerError"]["code"] == "LineExecutionTimeoutError"
else:
assert flow_run["status"] == "Completed"
os.environ.pop("PF_LINE_TIMEOUT_SEC")
@pytest.mark.usefixtures(
"use_secrets_config_file", "recording_injection", "setup_local_connection", "install_custom_tool_pkg"
)
@pytest.mark.cli_test
@pytest.mark.e2etest
class TestCli:
def test_pf_version(self, capfd):
run_pf_command("--version")
out, _ = capfd.readouterr()
assert "0.0.1\n" in out
def test_basic_flow_run(self, capfd) -> None:
# fetch std out
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--name",
str(uuid.uuid4()),
)
out, _ = capfd.readouterr()
assert "Completed" in out
def test_basic_flow_run_batch_and_eval(self, capfd) -> None:
run_id = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--name",
run_id,
)
out, _ = capfd.readouterr()
assert "Completed" in out
# Check the CLI works correctly when the parameter is surrounded by quotation, as below shown:
# --param "key=value" key="value"
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/classification_accuracy_evaluation",
"--column-mapping",
"'groundtruth=${data.answer}'",
"prediction='${run.outputs.category}'",
"variant_id=${data.variant_id}",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--run",
run_id,
)
out, _ = capfd.readouterr()
assert "Completed" in out
def test_submit_run_with_yaml(self, capfd):
run_id = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--file",
"./sample_bulk_run.yaml",
"--name",
run_id,
cwd=f"{RUNS_DIR}",
)
out, _ = capfd.readouterr()
assert "Completed" in out
run_pf_command(
"run",
"create",
"--file",
"./sample_eval_run.yaml",
"--run",
run_id,
cwd=f"{RUNS_DIR}",
)
out, _ = capfd.readouterr()
assert "Completed" in out
def test_submit_batch_variant(self, local_client):
run_id = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--name",
run_id,
"--variant",
"${summarize_text_content.variant_0}",
)
run = local_client.runs.get(name=run_id)
local_storage = LocalStorageOperations(run)
detail = local_storage.load_detail()
tuning_node = next((x for x in detail["node_runs"] if x["node"] == "summarize_text_content"), None)
# used variant_0 config, defaults using variant_1
assert tuning_node["inputs"]["temperature"] == 0.2
def test_environment_variable_overwrite(self, local_client, local_aoai_connection):
run_id = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--name",
run_id,
"--flow",
f"{FLOWS_DIR}/print_env_var",
"--data",
f"{DATAS_DIR}/env_var_names.jsonl",
"--environment-variables",
"API_BASE=${azure_open_ai_connection.api_base}",
)
outputs = local_client.runs._get_outputs(run=run_id)
assert outputs["output"][0] == local_aoai_connection.api_base
def test_connection_overwrite(self, local_alt_aoai_connection, capfd):
# CLi command will fail with SystemExit
with pytest.raises(SystemExit):
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--connection",
"classify_with_llm.connection=not_exist",
)
out, _ = capfd.readouterr()
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--connection",
"classify_with_llm.connection=new_ai_connection",
)
out, _ = capfd.readouterr()
assert "Completed" in out
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--connection",
"classify_with_llm.model=new_model",
)
out, _ = capfd.readouterr()
assert "Completed" in out
def test_create_with_set(self, local_client):
run_id = str(uuid.uuid4())
display_name = "test_run"
description = "test description"
run_pf_command(
"run",
"create",
"--name",
run_id,
"--flow",
f"{FLOWS_DIR}/print_env_var",
"--data",
f"{DATAS_DIR}/env_var_names.jsonl",
"--environment-variables",
"API_BASE=${azure_open_ai_connection.api_base}",
"--set",
f"display_name={display_name}",
"tags.key=val",
f"description={description}",
)
run = local_client.runs.get(run_id)
assert display_name in run.display_name
assert run.tags == {"key": "val"}
assert run.description == description
run_id = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--file",
"./sample_bulk_run.yaml",
"--name",
run_id,
"--set",
f"display_name={display_name}",
"tags.key=val",
f"description={description}",
cwd=f"{RUNS_DIR}",
)
assert display_name in run.display_name
assert run.tags == {"key": "val"}
assert run.description == description
def test_pf_flow_test(self):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--inputs",
"url=https://www.youtube.com/watch?v=o5ZQyXaAv1g",
"answer=Channel",
"evidence=Url",
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / "flow.output.json"
assert output_path.exists()
log_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / "flow.log"
with open(log_path, "r") as f:
previous_log_content = f.read()
# Test without input
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / "flow.output.json"
assert output_path.exists()
log_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / "flow.log"
with open(log_path, "r") as f:
log_content = f.read()
assert previous_log_content not in log_content
def test_pf_flow_test_with_non_english_input_output(self, capsys):
question = "什么是 chat gpt"
run_pf_command("flow", "test", "--flow", f"{FLOWS_DIR}/chat_flow", "--inputs", f'question="{question}"')
stdout, _ = capsys.readouterr()
output_path = Path(FLOWS_DIR) / "chat_flow" / ".promptflow" / "flow.output.json"
assert output_path.exists()
with open(output_path, "r", encoding="utf-8") as f:
outputs = json.load(f)
assert outputs["answer"] in json.loads(stdout)["answer"]
detail_path = Path(FLOWS_DIR) / "chat_flow" / ".promptflow" / "flow.detail.json"
assert detail_path.exists()
with open(detail_path, "r", encoding="utf-8") as f:
detail = json.load(f)
assert detail["flow_runs"][0]["inputs"]["question"] == question
assert detail["flow_runs"][0]["output"]["answer"] == outputs["answer"]
def test_pf_flow_with_variant(self, capsys):
with tempfile.TemporaryDirectory() as temp_dir:
shutil.copytree((Path(FLOWS_DIR) / "web_classification").resolve().as_posix(), temp_dir, dirs_exist_ok=True)
dag = Path(temp_dir) / "flow.dag.yaml"
flow_dict = load_yaml(dag)
node_name = "summarize_text_content"
node = next(filter(lambda item: item["name"] == node_name, flow_dict["nodes"]))
flow_dict["nodes"].remove(node)
flow_dict["nodes"].append({"name": node_name, "use_variants": True})
with open(Path(temp_dir) / "flow.dag.yaml", "w") as f:
dump_yaml(flow_dict, f)
run_pf_command(
"flow",
"test",
"--flow",
temp_dir,
"--inputs",
"url=https://www.youtube.com/watch?v=o5ZQyXaAv1g",
"answer=Channel",
"evidence=Url",
)
output_path = Path(temp_dir) / ".promptflow" / "flow.output.json"
assert output_path.exists()
run_pf_command(
"flow",
"test",
"--flow",
temp_dir,
"--inputs",
"url=https://www.youtube.com/watch?v=o5ZQyXaAv1g",
"answer=Channel",
"evidence=Url",
"--variant",
"'${summarize_text_content.variant_1}'",
)
output_path = Path(temp_dir) / ".promptflow" / "flow-summarize_text_content-variant_1.output.json"
assert output_path.exists()
# Test flow dag with invalid format
node_name = flow_dict["nodes"][0]["name"]
flow_dict["nodes"][0]["use_variants"] = True
flow_dict["node_variants"][node_name] = {
"default_variant_id": "invalid_variant",
"variants": [{"variant_0": {}}],
}
with open(Path(temp_dir) / "flow.dag.yaml", "w") as f:
dump_yaml(flow_dict, f)
with pytest.raises(SystemExit):
run_pf_command(
"flow",
"test",
"--flow",
temp_dir,
"--inputs",
"url=https://www.youtube.com/watch?v=o5ZQyXaAv1g",
"answer=Channel",
"evidence=Url",
"--variant",
"${summarize_text_content.variant_1}",
)
outerr = capsys.readouterr()
assert f"Cannot find the variant invalid_variant for {node_name}." in outerr.out
def test_pf_flow_test_single_node(self):
node_name = "fetch_text_content_from_url"
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--inputs",
"inputs.url="
"https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"--node",
node_name,
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / f"flow-{node_name}.node.detail.json"
assert output_path.exists()
node_name = "fetch_text_content_from_url"
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--inputs",
"url="
"https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"--node",
node_name,
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / f"flow-{node_name}.node.detail.json"
assert output_path.exists()
# Test node with node reference input
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--inputs",
'input_str={"category": "App", "evidence": "URL"}',
"--node",
"convert_to_dict",
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / "flow-convert_to_dict.node.detail.json"
assert output_path.exists()
# Test without input
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--node",
node_name,
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / f"flow-{node_name}.node.detail.json"
assert output_path.exists()
# Test with input file
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--node",
node_name,
"--input",
f"{FLOWS_DIR}/web_classification/{node_name}_input.jsonl",
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / f"flow-{node_name}.node.detail.json"
assert output_path.exists()
# Test with input file
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--node",
node_name,
"--inputs",
f"{FLOWS_DIR}/web_classification/{node_name}_input.jsonl",
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / f"flow-{node_name}.node.detail.json"
assert output_path.exists()
def test_pf_flow_test_debug_single_node(self):
node_name = "fetch_text_content_from_url"
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--inputs",
"inputs.url="
"https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"--node",
node_name,
"--debug",
)
# Debug node with node reference input
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--inputs",
'classify_with_llm.output={"category": "App", "evidence": "URL"}',
"--node",
"convert_to_dict",
"--debug",
)
def test_pf_flow_test_with_additional_includes(self):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification_with_additional_include",
"--inputs",
"url=https://www.youtube.com/watch?v=o5ZQyXaAv1g",
"answer=Channel",
"evidence=Url",
)
output_path = (
Path(FLOWS_DIR) / "web_classification_with_additional_include" / ".promptflow" / "flow.output.json"
)
assert output_path.exists()
node_name = "fetch_text_content_from_url"
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification_with_additional_include",
"--inputs",
"inputs.url="
"https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"--node",
node_name,
)
def test_pf_flow_test_with_symbolic(self, prepare_symbolic_flow):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification_with_symbolic",
"--inputs",
"url=https://www.youtube.com/watch?v=o5ZQyXaAv1g",
"answer=Channel",
"evidence=Url",
)
output_path = Path(FLOWS_DIR) / "web_classification_with_symbolic" / ".promptflow" / "flow.output.json"
assert output_path.exists()
node_name = "fetch_text_content_from_url"
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification_with_symbolic",
"--inputs",
"inputs.url="
"https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"--node",
node_name,
)
@pytest.mark.parametrize(
"flow_folder_name, env_key, except_value",
[
pytest.param(
"print_env_var",
"API_BASE",
"${azure_open_ai_connection.api_base}",
id="TestFlowWithEnvironmentVariables",
),
pytest.param(
"flow_with_environment_variables",
"env1",
"2",
id="LoadEnvVariablesWithoutOverridesInYaml",
),
],
)
def test_flow_test_with_environment_variable(self, flow_folder_name, env_key, except_value, local_client):
from promptflow._sdk._submitter.utils import SubmitterHelper
def validate_stdout(detail_path):
with open(detail_path, "r") as f:
details = json.load(f)
assert details["node_runs"][0]["logs"]["stdout"]
env = {env_key: except_value}
SubmitterHelper.resolve_environment_variables(env, local_client)
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/{flow_folder_name}",
"--inputs",
f"key={env_key}",
"--environment-variables",
"API_BASE=${azure_open_ai_connection.api_base}",
)
with open(Path(FLOWS_DIR) / flow_folder_name / ".promptflow" / "flow.output.json", "r") as f:
outputs = json.load(f)
assert outputs["output"] == env[env_key]
validate_stdout(Path(FLOWS_DIR) / flow_folder_name / ".promptflow" / "flow.detail.json")
# Test log contains user printed outputs
log_path = Path(FLOWS_DIR) / flow_folder_name / ".promptflow" / "flow.log"
with open(log_path, "r") as f:
log_content = f.read()
assert env[env_key] in log_content
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/{flow_folder_name}",
"--inputs",
f"inputs.key={env_key}",
"--environment-variables",
"API_BASE=${azure_open_ai_connection.api_base}",
"--node",
"print_env",
)
with open(Path(FLOWS_DIR) / flow_folder_name / ".promptflow" / "flow-print_env.node.output.json", "r") as f:
outputs = json.load(f)
assert outputs["value"] == env[env_key]
validate_stdout(Path(FLOWS_DIR) / flow_folder_name / ".promptflow" / "flow-print_env.node.detail.json")
def _validate_requirement(self, flow_path):
with open(flow_path) as f:
flow_dict = load_yaml(f)
assert flow_dict.get("environment", {}).get("python_requirements_txt", None)
assert (flow_path.parent / flow_dict["environment"]["python_requirements_txt"]).exists()
def test_flow_with_exception(self, capsys):
with pytest.raises(SystemExit):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification_with_exception",
)
captured = capsys.readouterr()
assert "Execution failure in 'convert_to_dict': (Exception) mock exception" in captured.out
output_path = Path(FLOWS_DIR) / "web_classification_with_exception" / ".promptflow" / "flow.detail.json"
assert output_path.exists()
with pytest.raises(SystemExit):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification_with_exception",
"--inputs",
'classify_with_llm.output={"category": "App", "evidence": "URL"}',
"--node",
"convert_to_dict",
)
captured = capsys.readouterr()
assert "convert_to_dict.py" in captured.out
assert "mock exception" in captured.out
output_path = (
Path(FLOWS_DIR)
/ "web_classification_with_exception"
/ ".promptflow"
/ "flow-convert_to_dict.node.detail.json"
)
assert output_path.exists()
def test_init_eval_flow(self):
temp_dir = mkdtemp()
with _change_working_dir(temp_dir):
flow_name = "eval_flow"
# Init standard flow
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--type",
"evaluation",
)
ignore_file_path = Path(temp_dir) / flow_name / ".gitignore"
assert ignore_file_path.exists()
ignore_file_path.unlink()
# TODO remove variant_id & line_number in evaluate template
run_pf_command("flow", "test", "--flow", flow_name, "--inputs", "groundtruth=App", "prediction=App")
self._validate_requirement(Path(temp_dir) / flow_name / "flow.dag.yaml")
def test_init_chat_flow(self):
temp_dir = mkdtemp()
with _change_working_dir(temp_dir):
flow_name = "chat_flow"
# Init standard flow
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--type",
"chat",
)
ignore_file_path = Path(temp_dir) / flow_name / ".gitignore"
assert ignore_file_path.exists()
ignore_file_path.unlink()
# Only azure openai connection in test env
with open(Path(temp_dir) / flow_name / "flow.dag.yaml", "r") as f:
flow_dict = load_yaml(f)
flow_dict["nodes"][0]["provider"] = "AzureOpenAI"
flow_dict["nodes"][0]["connection"] = "azure_open_ai_connection"
with open(Path(temp_dir) / flow_name / "flow.dag.yaml", "w") as f:
dump_yaml(flow_dict, f)
run_pf_command("flow", "test", "--flow", flow_name, "--inputs", "question=hi")
self._validate_requirement(Path(temp_dir) / flow_name / "flow.dag.yaml")
def test_flow_init(self, capsys):
temp_dir = mkdtemp()
with _change_working_dir(temp_dir):
flow_name = "standard_flow"
# Init standard flow
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--type",
"standard",
)
self._validate_requirement(Path(temp_dir) / flow_name / "flow.dag.yaml")
ignore_file_path = Path(temp_dir) / flow_name / ".gitignore"
requirements_file_path = Path(temp_dir) / flow_name / "requirements.txt"
assert ignore_file_path.exists()
assert requirements_file_path.exists()
ignore_file_path.unlink()
run_pf_command("flow", "test", "--flow", flow_name, "--inputs", "text=value")
jinja_name = "input1"
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--entry",
"hello.py",
"--function",
"my_python_tool",
"--prompt-template",
f"{jinja_name}=hello.jinja2",
)
self._validate_requirement(Path(temp_dir) / flow_name / "flow.dag.yaml")
assert ignore_file_path.exists()
assert requirements_file_path.exists()
with open(Path(temp_dir) / flow_name / ".promptflow" / "flow.tools.json", "r") as f:
tools_dict = json.load(f)["code"]
assert jinja_name in tools_dict
assert len(tools_dict[jinja_name]["inputs"]) == 1
assert tools_dict[jinja_name]["inputs"]["text"]["type"] == ["string"]
assert tools_dict[jinja_name]["source"] == "hello.jinja2"
# Test prompt-template doesn't exist
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--entry",
"hello.py",
"--function",
"my_python_tool",
"--prompt-template",
f"{jinja_name}={jinja_name}.jinja2",
)
self._validate_requirement(Path(temp_dir) / flow_name / "flow.dag.yaml")
assert (Path(temp_dir) / flow_name / f"{jinja_name}.jinja2").exists()
# Test template name doesn't exist in python function
jinja_name = "mock_jinja"
with pytest.raises(UserErrorException) as ex:
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--entry",
"hello.py",
"--function",
"my_python_tool",
"--prompt-template",
f"{jinja_name}={jinja_name}.jinja2",
)
assert f"Template parameter {jinja_name} doesn't find in python function arguments." in str(ex.value)
with pytest.raises(SystemExit):
run_pf_command("flow", "init")
_, err = capsys.readouterr()
assert "pf flow init: error: the following arguments are required: --flow" in err
def test_flow_init_intent_copilot(self):
flow_path = os.path.join(FLOWS_DIR, "intent-copilot")
run_pf_command(
"flow",
"init",
"--flow",
flow_path,
"--entry",
"intent.py",
"--function",
"extract_intent",
"--prompt-template",
"chat_prompt=user_intent_zero_shot.jinja2",
)
with open(Path(flow_path) / "flow.dag.yaml", "r") as f:
flow_dict = load_yaml(f)
assert "chat_history" in flow_dict["inputs"]
assert "customer_info" in flow_dict["inputs"]
chat_prompt_node = next(filter(lambda item: item["name"] == "chat_prompt", flow_dict["nodes"]))
assert "chat_history" in chat_prompt_node["inputs"]
assert "customer_info" in chat_prompt_node["inputs"]
def test_flow_init_with_connection_and_deployment(self):
def check_connection_and_deployment(flow_folder, connection, deployment):
with open(Path(flow_folder) / "flow.dag.yaml", "r") as f:
flow_dict = load_yaml(f)
assert flow_dict["nodes"][0]["inputs"]["deployment_name"] == deployment
assert flow_dict["nodes"][0]["connection"] == connection
temp_dir = mkdtemp()
with _change_working_dir(temp_dir):
flow_name = "chat_flow"
flow_folder = Path(temp_dir) / flow_name
# When configure local connection provider, init chat flow without connection and deployment.
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--type",
"chat",
)
# Assert connection files created
assert (flow_folder / "azure_openai.yaml").exists()
assert (flow_folder / "openai.yaml").exists()
# When configure local connection provider, init chat flow with connection and deployment.
connection = "connection_name"
deployment = "deployment_name"
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--type",
"chat",
"--connection",
connection,
"--deployment",
deployment,
"--yes",
)
# Assert connection files created and the connection/deployment is set in flow.dag.yaml
check_connection_and_deployment(flow_folder, connection=connection, deployment=deployment)
connection_files = [flow_folder / "azure_openai.yaml", flow_folder / "openai.yaml"]
for file in connection_files:
assert file.exists()
with open(file, "r") as f:
connection_dict = load_yaml(f)
assert connection_dict["name"] == connection
shutil.rmtree(flow_folder)
target = "promptflow._sdk._pf_client.Configuration.get_connection_provider"
with mock.patch(target) as mocked:
mocked.return_value = "azureml:xx"
# When configure azure connection provider, init chat flow without connection and deployment.
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--type",
"chat",
"--yes",
)
# Assert connection files not created.
assert not (flow_folder / "azure_openai.yaml").exists()
assert not (flow_folder / "openai.yaml").exists()
# When configure azure connection provider, init chat flow with connection and deployment.
connection = "connection_name"
deployment = "deployment_name"
run_pf_command(
"flow",
"init",
"--flow",
flow_name,
"--type",
"chat",
"--connection",
connection,
"--deployment",
deployment,
"--yes",
)
# Assert connection files not created and the connection/deployment is set in flow.dag.yaml
check_connection_and_deployment(flow_folder, connection=connection, deployment=deployment)
assert not (flow_folder / "azure_openai.yaml").exists()
assert not (flow_folder / "openai.yaml").exists()
def test_flow_chat(self, monkeypatch, capsys):
chat_list = ["hi", "what is chat gpt?"]
def mock_input(*args, **kwargs):
if chat_list:
return chat_list.pop()
else:
raise KeyboardInterrupt()
monkeypatch.setattr("builtins.input", mock_input)
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow",
"--interactive",
)
output_path = Path(FLOWS_DIR) / "chat_flow" / ".promptflow" / "chat.output.json"
assert output_path.exists()
detail_path = Path(FLOWS_DIR) / "chat_flow" / ".promptflow" / "chat.detail.json"
assert detail_path.exists()
# Test streaming output
chat_list = ["hi", "what is chat gpt?"]
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_stream_output",
"--interactive",
)
output_path = Path(FLOWS_DIR) / "chat_flow_with_stream_output" / ".promptflow" / "chat.output.json"
assert output_path.exists()
detail_path = Path(FLOWS_DIR) / "chat_flow_with_stream_output" / ".promptflow" / "chat.detail.json"
assert detail_path.exists()
chat_list = ["hi", "what is chat gpt?"]
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_python_node_streaming_output",
"--interactive",
)
output_path = Path(FLOWS_DIR) / "chat_flow_with_stream_output" / ".promptflow" / "chat.output.json"
assert output_path.exists()
detail_path = Path(FLOWS_DIR) / "chat_flow_with_stream_output" / ".promptflow" / "chat.detail.json"
assert detail_path.exists()
# Validate terminal output
chat_list = ["hi", "what is chat gpt?"]
run_pf_command("flow", "test", "--flow", f"{FLOWS_DIR}/chat_flow", "--interactive", "--verbose")
outerr = capsys.readouterr()
# Check node output
assert "chat_node:" in outerr.out
assert "show_answer:" in outerr.out
assert "[show_answer]: print:" in outerr.out
chat_list = ["hi", "what is chat gpt?"]
with pytest.raises(SystemExit):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_exception",
"--interactive",
)
outerr = capsys.readouterr()
assert "Execution failure in 'show_answer': (Exception) mock exception" in outerr.out
output_path = Path(FLOWS_DIR) / "chat_flow" / ".promptflow" / "chat.output.json"
assert output_path.exists()
detail_path = Path(FLOWS_DIR) / "chat_flow" / ".promptflow" / "chat.detail.json"
assert detail_path.exists()
with pytest.raises(SystemExit):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_multi_output_invalid",
"--interactive",
)
outerr = capsys.readouterr()
assert "chat flow does not support multiple chat outputs" in outerr.out
def test_flow_test_with_default_chat_history(self):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_default_history",
)
output_path = Path(FLOWS_DIR) / "chat_flow_with_default_history" / ".promptflow" / "flow.output.json"
assert output_path.exists()
detail_path = Path(FLOWS_DIR) / "chat_flow_with_default_history" / ".promptflow" / "flow.detail.json"
assert detail_path.exists()
with open(detail_path, "r") as f:
details = json.load(f)
expect_chat_history = [
{"inputs": {"question": "hi"}, "outputs": {"answer": "hi"}},
{"inputs": {"question": "who are you"}, "outputs": {"answer": "who are you"}},
]
assert details["flow_runs"][0]["inputs"]["chat_history"] == expect_chat_history
def test_flow_test_with_user_defined_chat_history(self, monkeypatch, capsys):
chat_list = ["hi", "what is chat gpt?"]
def mock_input(*args, **kwargs):
if chat_list:
return chat_list.pop()
else:
raise KeyboardInterrupt()
monkeypatch.setattr("builtins.input", mock_input)
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_with_defined_chat_history",
"--interactive",
)
output_path = Path(FLOWS_DIR) / "chat_flow_with_defined_chat_history" / ".promptflow" / "chat.output.json"
assert output_path.exists()
detail_path = Path(FLOWS_DIR) / "chat_flow_with_defined_chat_history" / ".promptflow" / "chat.detail.json"
assert detail_path.exists()
# Test is_chat_history is set False
with pytest.raises(SystemExit):
chat_list = ["hi", "what is chat gpt?"]
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/chat_flow_without_defined_chat_history",
"--interactive",
)
outerr = capsys.readouterr()
assert "chat_history is required in the inputs of chat flow" in outerr.out
@pytest.mark.parametrize(
"extra_args,expected_err",
[
pytest.param(
[],
"Required input(s) ['key'] are missing for \"flow\".",
id="missing_required_flow_inputs",
),
pytest.param(
["--node", "print_env"],
"Required input(s) ['key'] are missing for \"print_env\".",
id="missing_required_node_inputs",
),
],
)
def test_flow_test_inputs_missing(self, capsys, caplog, extra_args: List[str], expected_err: str):
with pytest.raises(SystemExit):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/print_env_var",
"--environment-variables",
"API_BASE=${azure_open_ai_connection.api_base}",
*extra_args,
)
stdout, _ = capsys.readouterr()
assert expected_err in stdout
@pytest.mark.parametrize(
"extra_args,expected_inputs,expected_log_prefixes",
[
pytest.param(
[
"--inputs",
f"url={TARGET_URL}",
"answer=Channel",
"evidence=Url",
],
[
{"answer": "Channel", "evidence": "Url"},
{"url": TARGET_URL, "answer": "Channel", "evidence": "Url"},
],
[
"Unknown input(s) of flow: ",
"flow input(s): ",
],
id="unknown_flow_inputs",
),
pytest.param(
[
"--inputs",
f"inputs.url={TARGET_URL}",
"unknown_input=unknown_val",
"--node",
"fetch_text_content_from_url",
],
[
{"unknown_input": "unknown_val"},
{"fetch_url": TARGET_URL, "unknown_input": "unknown_val"},
],
[
"Unknown input(s) of fetch_text_content_from_url: ",
"fetch_text_content_from_url input(s): ",
],
id="unknown_inputs_node",
),
],
)
def test_flow_test_inputs_unknown(
self, caplog, extra_args: List[str], expected_inputs: List[Dict[str, str]], expected_log_prefixes: List[str]
):
logger = logging.getLogger(LOGGER_NAME)
logger.propagate = True
def validate_log(log_msg, prefix, expect_dict):
log_inputs = json.loads(log_msg[len(prefix) :].replace("'", '"'))
assert prefix in log_msg
assert expect_dict == log_inputs
with caplog.at_level(level=logging.INFO, logger=LOGGER_NAME):
run_pf_command("flow", "test", "--flow", f"{FLOWS_DIR}/web_classification", *extra_args)
for log, expected_input, expected_log_prefix in zip(caplog.records, expected_inputs, expected_log_prefixes):
validate_log(
prefix=expected_log_prefix,
log_msg=log.message,
expect_dict=expected_input,
)
def test_flow_build(self):
source = f"{FLOWS_DIR}/web_classification_with_additional_include/flow.dag.yaml"
output_path = "dist"
def get_node_settings(_flow_dag_path: Path):
flow_dag = load_yaml(_flow_dag_path)
target_node = next(filter(lambda x: x["name"] == "summarize_text_content", flow_dag["nodes"]))
target_node.pop("name")
return target_node
try:
run_pf_command(
"flow",
"build",
"--source",
source,
"--output",
output_path,
"--format",
"docker",
"--variant",
"${summarize_text_content.variant_0}",
)
new_flow_dag_path = Path(output_path, "flow", "flow.dag.yaml")
flow_dag = load_yaml(Path(source))
assert (
get_node_settings(new_flow_dag_path)
== flow_dag["node_variants"]["summarize_text_content"]["variants"]["variant_0"]["node"]
)
assert get_node_settings(Path(source)) != get_node_settings(new_flow_dag_path)
connection_path = Path(output_path, "connections", "azure_open_ai_connection.yaml")
assert connection_path.exists()
finally:
shutil.rmtree(output_path, ignore_errors=True)
def test_flow_build_with_ua(self):
with pytest.raises(UserErrorException) as e:
run_pf_command(
"flow",
"build",
"--source",
"not_exist",
"--output",
"dist",
"--format",
"docker",
"--user-agent",
"test/1.0.0",
)
assert "not exist" in str(e.value)
@pytest.mark.parametrize(
"file_name, expected, update_item",
[
(
"azure_openai_connection.yaml",
{
"module": "promptflow.connections",
"type": "azure_open_ai",
"api_type": "azure",
"api_version": "2023-07-01-preview",
"api_key": SCRUBBED_VALUE,
"api_base": "aoai-api-endpoint",
},
("api_base", "new_value"),
),
(
"custom_connection.yaml",
{
"module": "promptflow.connections",
"type": "custom",
"configs": {"key1": "test1"},
"secrets": {"key2": SCRUBBED_VALUE},
},
("configs.key1", "new_value"),
),
(
"custom_strong_type_connection.yaml",
{
"module": "promptflow.connections",
"type": "custom",
"configs": {
"api_base": "This is my first connection.",
"promptflow.connection.custom_type": "MyFirstConnection",
"promptflow.connection.module": "my_tool_package.connections",
"promptflow.connection.package": "test-custom-tools",
"promptflow.connection.package_version": "0.0.2",
},
"secrets": {"api_key": SCRUBBED_VALUE},
},
("configs.api_base", "new_value"),
),
],
)
def test_connection_create_update(
self, install_custom_tool_pkg, file_name, expected, update_item, capfd, local_client
):
name = f"Connection_{str(uuid.uuid4())[:4]}"
run_pf_command("connection", "create", "--file", f"{CONNECTIONS_DIR}/{file_name}", "--name", f"{name}")
out, err = capfd.readouterr()
# Assert in to skip some datetime fields
assert expected.items() <= json.loads(out).items()
# Update with --set
update_key, update_value = update_item
run_pf_command("connection", "update", "--set", f"{update_key}={update_value}", "--name", f"{name}")
out, _ = capfd.readouterr()
assert update_value in out, f"expected updated value {update_value} not in {out}"
connection = local_client.connections.get(name)
# Assert secrets are not scrubbed
assert not any(v == SCRUBBED_VALUE for v in connection._secrets.values())
def test_input_with_dict_val(self, pf):
run_id = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--file",
"./input_with_dict_val.yaml",
"--name",
run_id,
cwd=f"{RUNS_DIR}",
)
outputs = pf.runs._get_outputs(run=run_id)
assert "dict" in outputs["output"][0]
def test_visualize_ignore_space(self) -> None:
names = ["a,b,c,d", "a, b, c, d", "a, b , c, d"]
groundtruth = ["a", "b", "c", "d"]
def mocked_visualize(*args, **kwargs):
runs = args[0]
assert runs == groundtruth
with patch.object(RunOperations, "visualize") as mock_visualize:
mock_visualize.side_effect = mocked_visualize
for name in names:
run_pf_command(
"run",
"visualize",
"--names",
name,
)
def test_pf_run_with_stream_log(self, capfd):
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/flow_with_user_output",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--column-mapping",
"key=value",
"extra=${data.url}",
"--stream",
)
out, _ = capfd.readouterr()
# For Batch run, the executor uses bulk logger to print logs, and only prints the error log of the nodes.
existing_keywords = ["execution", "execution.bulk", "WARNING", "error log"]
non_existing_keywords = ["execution.flow", "user log"]
for keyword in existing_keywords:
assert keyword in out
for keyword in non_existing_keywords:
assert keyword not in out
def test_pf_run_no_stream_log(self, capfd):
# without --stream, logs will be in the run's log file
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/flow_with_user_output",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--column-mapping",
"key=value",
"extra=${data.url}",
)
out, _ = capfd.readouterr()
assert "user log" not in out
assert "error log" not in out
# flow logs won't stream
assert "Executing node print_val. node run id:" not in out
# executor logs won't stream
assert "Node print_val completes." not in out
def test_format_cli_exception(self, capsys):
from promptflow._sdk.operations._connection_operations import ConnectionOperations
with patch.dict(os.environ, {"PROMPTFLOW_STRUCTURE_EXCEPTION_OUTPUT": "true"}):
with pytest.raises(SystemExit):
run_pf_command(
"connection",
"show",
"--name",
"invalid_connection_name",
)
outerr = capsys.readouterr()
assert outerr.err
error_msg = json.loads(outerr.err)
assert error_msg["code"] == "UserError"
assert error_msg["innerError"]["innerError"]["code"] == "ConnectionNotFoundError"
def mocked_connection_get(*args, **kwargs):
raise Exception("mock exception")
with patch.object(ConnectionOperations, "get") as mock_connection_get:
mock_connection_get.side_effect = mocked_connection_get
with pytest.raises(Exception):
run_pf_command(
"connection",
"show",
"--name",
"invalid_connection_name",
)
outerr = capsys.readouterr()
assert outerr.err
error_msg = json.loads(outerr.err)
assert error_msg["code"] == "SystemError"
with pytest.raises(SystemExit):
run_pf_command(
"connection",
"show",
"--name",
"invalid_connection_name",
)
outerr = capsys.readouterr()
assert not outerr.err
def test_tool_init(self, capsys):
with tempfile.TemporaryDirectory() as temp_dir:
package_name = "package_name"
func_name = "func_name"
run_pf_command("tool", "init", "--package", package_name, "--tool", func_name, cwd=temp_dir)
package_folder = Path(temp_dir) / package_name
sys.path.append(str(package_folder.absolute()))
assert (package_folder / package_name / f"{func_name}.py").exists()
assert (package_folder / package_name / "utils.py").exists()
assert (package_folder / package_name / "__init__.py").exists()
assert (package_folder / "setup.py").exists()
assert (package_folder / "README.md").exists()
spec = importlib.util.spec_from_file_location(
f"{package_name}.utils", package_folder / package_name / "utils.py"
)
utils = importlib.util.module_from_spec(spec)
spec.loader.exec_module(utils)
assert hasattr(utils, "list_package_tools")
tools_meta = utils.list_package_tools()
assert f"{package_name}.{func_name}.{func_name}" in tools_meta
meta = tools_meta[f"{package_name}.{func_name}.{func_name}"]
assert meta["function"] == func_name
assert meta["module"] == f"{package_name}.{func_name}"
assert meta["name"] == func_name
assert meta["description"] == f"This is {func_name} tool"
assert meta["type"] == "python"
# Invalid package/tool name
invalid_package_name = "123-package-name"
invalid_tool_name = "123_tool_name"
with pytest.raises(SystemExit):
run_pf_command("tool", "init", "--package", invalid_package_name, "--tool", func_name, cwd=temp_dir)
outerr = capsys.readouterr()
assert f"The package name {invalid_package_name} is a invalid identifier." in outerr.out
with pytest.raises(SystemExit):
run_pf_command("tool", "init", "--package", package_name, "--tool", invalid_tool_name, cwd=temp_dir)
outerr = capsys.readouterr()
assert f"The tool name {invalid_tool_name} is a invalid identifier." in outerr.out
with pytest.raises(SystemExit):
run_pf_command("tool", "init", "--tool", invalid_tool_name, cwd=temp_dir)
outerr = capsys.readouterr()
assert f"The tool name {invalid_tool_name} is a invalid identifier." in outerr.out
# Test init package tool with extra info
package_name = "tool_with_extra_info"
package_folder = Path(temp_dir) / package_name
package_folder.mkdir(exist_ok=True, parents=True)
manifest_file = package_folder / "MANIFEST.in"
mock_manifest_content = "include mock/path"
with open(manifest_file, "w") as f:
f.write(mock_manifest_content)
icon_path = Path(DATAS_DIR) / "logo.jpg"
category = "test_category"
tags = {"tag1": "value1", "tag2": "value2"}
run_pf_command(
"tool",
"init",
"--package",
package_name,
"--tool",
func_name,
"--set",
f"icon={icon_path.absolute()}",
f"category={category}",
f"tags={tags}",
cwd=temp_dir,
)
with open(manifest_file, "r") as f:
content = f.read()
assert mock_manifest_content in content
assert f"include {package_name}/icons" in content
# Add a tool script with icon
tool_script_name = "tool_func_with_icon"
run_pf_command(
"tool",
"init",
"--tool",
tool_script_name,
"--set",
f"icon={icon_path.absolute()}",
f"category={category}",
f"tags={tags}",
cwd=Path(temp_dir) / package_name / package_name,
)
sys.path.append(str(package_folder.absolute()))
spec = importlib.util.spec_from_file_location(
f"{package_name}.utils", package_folder / package_name / "utils.py"
)
utils = importlib.util.module_from_spec(spec)
spec.loader.exec_module(utils)
assert hasattr(utils, "list_package_tools")
tools_meta = utils.list_package_tools()
meta = tools_meta[f"{package_name}.{func_name}.{func_name}"]
assert meta["category"] == category
assert meta["tags"] == tags
assert meta["icon"].startswith("data:image")
assert tools_meta[f"{package_name}.{tool_script_name}.{tool_script_name}"]["icon"].startswith("data:image")
# icon doesn't exist
with pytest.raises(SystemExit):
run_pf_command(
"tool",
"init",
"--package",
package_name,
"--tool",
func_name,
"--set",
"icon=invalid_icon_path",
cwd=temp_dir,
)
outerr = capsys.readouterr()
assert "Cannot find the icon path" in outerr.out
def test_tool_list(self, capsys):
# List package tools in environment
run_pf_command("tool", "list")
outerr = capsys.readouterr()
tools_dict = json.loads(outerr.out)
package_tool_name = "promptflow.tools.embedding.embedding"
assert package_tool_name in tools_dict["package"]
# List flow tools and package tools
run_pf_command("tool", "list", "--flow", f"{FLOWS_DIR}/chat_flow")
outerr = capsys.readouterr()
tools_dict = json.loads(outerr.out)
expect_flow_tools = {
"chat.jinja2": {
"type": "llm",
"inputs": {"chat_history": {"type": ["string"]}, "question": {"type": ["string"]}},
"source": "chat.jinja2",
},
"show_answer.py": {
"type": "python",
"inputs": {"chat_answer": {"type": ["string"]}},
"source": "show_answer.py",
"function": "show_answer",
},
}
assert tools_dict["code"] == expect_flow_tools
assert package_tool_name in tools_dict["package"]
# Invalid flow parameter
with pytest.raises(SystemExit):
run_pf_command("tool", "list", "--flow", "invalid_flow_folder")
outerr = capsys.readouterr()
assert "invalid_flow_folder does not exist" in outerr.out
def test_tool_validate(self):
# Test validate tool script
tool_script_path = Path(TOOL_ROOT) / "custom_llm_tool.py"
run_pf_command("tool", "validate", "--source", str(tool_script_path))
invalid_tool_script_path = Path(TOOL_ROOT) / "invalid_tool.py"
with pytest.raises(SystemExit):
run_pf_command("tool", "validate", "--source", str(invalid_tool_script_path))
# Test validate package tool
tool_script_path = Path(TOOL_ROOT) / "tool_package"
sys.path.append(str(tool_script_path.resolve()))
with patch("promptflow._sdk.operations._tool_operations.ToolOperations._is_package_tool", return_value=True):
with pytest.raises(SystemExit):
run_pf_command("tool", "validate", "--source", "tool_package")
# Test validate tool in package
with pytest.raises(SystemExit):
run_pf_command("tool", "validate", "--source", "tool_package.invalid_tool.invalid_input_settings")
def test_flow_test_with_image_input_and_output(self):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/python_tool_with_simple_image",
)
output_path = Path(FLOWS_DIR) / "python_tool_with_simple_image" / ".promptflow" / "output"
assert output_path.exists()
image_path = Path(FLOWS_DIR) / "python_tool_with_simple_image" / ".promptflow" / "intermediate"
assert image_path.exists()
def test_flow_test_with_composite_image(self):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/python_tool_with_composite_image",
)
output_path = Path(FLOWS_DIR) / "python_tool_with_composite_image" / ".promptflow" / "output"
assert output_path.exists()
image_path = Path(FLOWS_DIR) / "python_tool_with_composite_image" / ".promptflow" / "intermediate"
assert image_path.exists()
def test_run_file_with_set(self, pf) -> None:
name = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--file",
f"{RUNS_DIR}/run_with_env.yaml",
"--set",
f"name={name}",
)
# run exists
pf.runs.get(name=name)
def test_run_file_with_set_priority(self, pf) -> None:
# --name has higher priority than --set
name1 = str(uuid.uuid4())
name2 = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--file",
f"{RUNS_DIR}/run_with_env.yaml",
"--set",
f"name={name1}",
"--name",
name2,
)
# run exists
try:
pf.runs.get(name=name1)
except RunNotFoundError:
pass
pf.runs.get(name=name2)
def test_data_scrubbing(self):
# Prepare connection
run_pf_command(
"connection", "create", "--file", f"{CONNECTIONS_DIR}/custom_connection.yaml", "--name", "custom_connection"
)
# Test flow run
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/print_secret_flow",
)
output_path = Path(FLOWS_DIR) / "print_secret_flow" / ".promptflow" / "flow.output.json"
assert output_path.exists()
log_path = Path(FLOWS_DIR) / "print_secret_flow" / ".promptflow" / "flow.log"
with open(log_path, "r") as f:
log_content = f.read()
assert "**data_scrubbed**" in log_content
# Test node run
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/print_secret_flow",
"--node",
"print_secret",
"--inputs",
"conn=custom_connection",
"inputs.topic=atom",
)
output_path = Path(FLOWS_DIR) / "print_secret_flow" / ".promptflow" / "flow-print_secret.node.detail.json"
assert output_path.exists()
log_path = Path(FLOWS_DIR) / "print_secret_flow" / ".promptflow" / "print_secret.node.log"
with open(log_path, "r") as f:
log_content = f.read()
assert "**data_scrubbed**" in log_content
def test_cli_ua(self, pf):
# clear user agent before test
context = OperationContext().get_instance()
context.user_agent = ""
with environment_variable_overwrite(PF_USER_AGENT, ""):
with pytest.raises(SystemExit):
run_pf_command(
"run",
"show",
"--name",
"not_exist",
)
user_agent = ClientUserAgentUtil.get_user_agent()
ua_dict = parse_ua_to_dict(user_agent)
assert ua_dict.keys() == {"promptflow-sdk", "promptflow-cli"}
def test_config_set_pure_flow_directory_macro(self, capfd: pytest.CaptureFixture) -> None:
run_pf_command(
"config",
"set",
"run.output_path='${flow_directory}'",
)
out, _ = capfd.readouterr()
expected_error_message = (
"Invalid config value '${flow_directory}' for 'run.output_path': "
"Cannot specify flow directory as run output path; "
"if you want to specify run output path under flow directory, "
"please use its child folder, e.g. '${flow_directory}/.runs'."
)
assert expected_error_message in out
from promptflow._sdk._configuration import Configuration
config = Configuration.get_instance()
assert config.get_run_output_path() is None
def test_user_agent_in_cli(self):
context = OperationContext().get_instance()
context.user_agent = ""
with pytest.raises(SystemExit):
run_pf_command(
"run",
"show",
"--name",
"not_exist",
"--user-agent",
"a/1.0.0 b/2.0",
)
user_agent = ClientUserAgentUtil.get_user_agent()
ua_dict = parse_ua_to_dict(user_agent)
assert ua_dict.keys() == {"promptflow-sdk", "promptflow-cli", "a", "b"}
context.user_agent = ""
def test_node_run_telemetry(self, local_client):
from promptflow._sdk._telemetry.logging_handler import PromptFlowSDKLogHandler
def assert_node_run(*args, **kwargs):
record = args[0]
assert record.msg.startswith("pf.flow.node_test") or record.msg.startswith("pf.flows.node_test")
assert record.custom_dimensions["activity_name"] in ["pf.flow.node_test", "pf.flows.node_test"]
def assert_flow_test(*args, **kwargs):
record = args[0]
assert record.msg.startswith("pf.flow.test") or record.msg.startswith("pf.flows.test")
assert record.custom_dimensions["activity_name"] in ["pf.flow.test", "pf.flows.test"]
with tempfile.TemporaryDirectory() as temp_dir:
shutil.copytree((Path(FLOWS_DIR) / "print_env_var").resolve().as_posix(), temp_dir, dirs_exist_ok=True)
with patch.object(PromptFlowSDKLogHandler, "emit") as mock_logger:
mock_logger.side_effect = assert_node_run
run_pf_command(
"flow",
"test",
"--flow",
temp_dir,
"--inputs",
"key=API_BASE",
"--node",
"print_env",
)
with patch.object(PromptFlowSDKLogHandler, "emit") as mock_logger:
mock_logger.side_effect = assert_flow_test
run_pf_command(
"flow",
"test",
"--flow",
temp_dir,
"--inputs",
"key=API_BASE",
)
def test_run_create_with_existing_run_folder(self):
run_name = "web_classification_variant_0_20231205_120253_104100"
# clean the run if exists
from promptflow import PFClient
from promptflow._cli._utils import _try_delete_existing_run_record
pf = PFClient()
_try_delete_existing_run_record(run_name)
# assert the run doesn't exist
with pytest.raises(RunNotFoundError):
pf.runs.get(run_name)
uuid_str = str(uuid.uuid4())
run_folder = Path(RUNS_DIR) / run_name
run_pf_command(
"run",
"create",
"--source",
Path(run_folder).resolve().as_posix(),
"--set",
f"display_name={uuid_str}",
f"description={uuid_str}",
f"tags.tag1={uuid_str}",
)
# check run results
run = pf.runs.get(run_name)
assert run.display_name == uuid_str
assert run.description == uuid_str
assert run.tags["tag1"] == uuid_str
def test_cli_command_no_sub_command(self, capfd):
try:
run_pf_command(
"run",
)
# argparse will return SystemExit after running --help
except SystemExit:
pass
# will run pf run -h
out, _ = capfd.readouterr()
assert "A CLI tool to manage runs for prompt flow." in out
try:
run_pf_command("run", "-h")
# argparse will return SystemExit after running --help
except SystemExit:
pass
# will run pf run -h
out, _ = capfd.readouterr()
assert "A CLI tool to manage runs for prompt flow." in out
def test_unknown_command(self, capfd):
try:
run_pf_command(
"unknown",
)
# argparse will return SystemExit after running --help
except SystemExit:
pass
_, err = capfd.readouterr()
assert "invalid choice" in err
def test_config_set_user_agent(self) -> None:
run_pf_command(
"config",
"set",
"user_agent=test/1.0.0",
)
user_agent = setup_user_agent_to_operation_context(None)
ua_dict = parse_ua_to_dict(user_agent)
assert ua_dict.keys() == {"promptflow-sdk", "promptflow-cli", "PFCustomer_test"}
# clear user agent
run_pf_command(
"config",
"set",
"user_agent=",
)
context = OperationContext().get_instance()
context.user_agent = ""
def test_basic_flow_run_delete(self, monkeypatch, local_client, capfd) -> None:
input_list = ["y"]
def mock_input(*args, **kwargs):
if input_list:
return input_list.pop()
else:
raise KeyboardInterrupt()
monkeypatch.setattr("builtins.input", mock_input)
run_id = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--name",
run_id,
"--flow",
f"{FLOWS_DIR}/print_env_var",
"--data",
f"{DATAS_DIR}/env_var_names.jsonl",
)
out, _ = capfd.readouterr()
assert "Completed" in out
run_a = local_client.runs.get(name=run_id)
local_storage = LocalStorageOperations(run_a)
path_a = local_storage.path
assert os.path.exists(path_a)
# delete the run
run_pf_command(
"run",
"delete",
"--name",
f"{run_id}",
)
# both runs are deleted and their folders are deleted
assert not os.path.exists(path_a)
def test_basic_flow_run_delete_no_confirm(self, monkeypatch, local_client, capfd) -> None:
run_id = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--name",
run_id,
"--flow",
f"{FLOWS_DIR}/print_env_var",
"--data",
f"{DATAS_DIR}/env_var_names.jsonl",
)
out, _ = capfd.readouterr()
assert "Completed" in out
run_a = local_client.runs.get(name=run_id)
local_storage = LocalStorageOperations(run_a)
path_a = local_storage.path
assert os.path.exists(path_a)
# delete the run
run_pf_command("run", "delete", "--name", f"{run_id}", "-y")
# both runs are deleted and their folders are deleted
assert not os.path.exists(path_a)
def test_basic_flow_run_delete_error(self, monkeypatch) -> None:
input_list = ["y"]
def mock_input(*args, **kwargs):
if input_list:
return input_list.pop()
else:
raise KeyboardInterrupt()
monkeypatch.setattr("builtins.input", mock_input)
run_id = str(uuid.uuid4())
# delete the run
with pytest.raises(SystemExit):
run_pf_command(
"run",
"delete",
"--name",
f"{run_id}",
)
def test_experiment_hide_by_default(self, monkeypatch, capfd):
# experiment will be hide if no config set
with pytest.raises(SystemExit):
run_pf_command(
"experiment",
"create",
"--template",
f"{EXPERIMENT_DIR}/basic-no-script-template/basic.exp.yaml",
)
@pytest.mark.usefixtures("setup_experiment_table")
def test_experiment_start(self, monkeypatch, capfd, local_client):
with mock.patch("promptflow._sdk._configuration.Configuration.is_internal_features_enabled") as mock_func:
mock_func.return_value = True
exp_name = str(uuid.uuid4())
run_pf_command(
"experiment",
"create",
"--template",
f"{EXPERIMENT_DIR}/basic-script-template/basic-script.exp.yaml",
"--name",
exp_name,
)
out, _ = capfd.readouterr()
assert exp_name in out
assert ExperimentStatus.NOT_STARTED in out
run_pf_command(
"experiment",
"start",
"--name",
exp_name,
)
out, _ = capfd.readouterr()
assert ExperimentStatus.TERMINATED in out
exp = local_client._experiments.get(name=exp_name)
assert len(exp.node_runs) == 4
assert all(len(exp.node_runs[node_name]) > 0 for node_name in exp.node_runs)
metrics = local_client.runs.get_metrics(name=exp.node_runs["eval"][0]["name"])
assert "accuracy" in metrics
def test_batch_run_timeout(self, local_client):
line_timeout_seconds = "54"
timout_index = 9
p = multiprocessing.Process(
target=run_batch,
args=(local_client, line_timeout_seconds, timout_index),
)
p.start()
p.join()
assert p.exitcode == 0
def test_batch_run_completed_within_the_required_time(self, local_client):
line_timeout_seconds = "600"
p = multiprocessing.Process(
target=run_batch,
args=(
local_client,
line_timeout_seconds,
),
)
p.start()
p.join()
assert p.exitcode == 0
def test_run_list(self, local_client):
from promptflow._sdk.entities import Run
with patch.object(Run, "_to_dict") as mock_to_dict:
mock_to_dict.side_effect = RuntimeError("mock exception")
run_pf_command(
"run",
"list",
)
def test_pf_flow_test_with_detail(self, tmpdir):
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--inputs",
"url=https://www.youtube.com/watch?v=o5ZQyXaAv1g",
"answer=Channel",
"evidence=Url",
"--detail",
Path(tmpdir).as_posix(),
)
# when specify parameter `detail`, detail, output and log will be saved in both
# the specified folder and ".promptflow" under flow folder
for parent_folder in [
Path(FLOWS_DIR) / "web_classification" / ".promptflow",
Path(tmpdir),
]:
for filename in ["flow.detail.json", "flow.output.json", "flow.log"]:
path = parent_folder / filename
assert path.is_file()
def test_pf_flow_test_single_node_with_detail(self, tmpdir):
node_name = "fetch_text_content_from_url"
run_pf_command(
"flow",
"test",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--inputs",
"inputs.url="
"https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"--node",
node_name,
"--detail",
Path(tmpdir).as_posix(),
)
output_path = Path(FLOWS_DIR) / "web_classification" / ".promptflow" / f"flow-{node_name}.node.detail.json"
assert output_path.exists()
# when specify parameter `detail`, node detail, output and log will be saved in both
# the specified folder and ".promptflow" under flow folder
for parent_folder in [
Path(FLOWS_DIR) / "web_classification" / ".promptflow",
Path(tmpdir),
]:
for filename in [
f"flow-{node_name}.node.detail.json",
f"flow-{node_name}.node.output.json",
f"{node_name}.node.log",
]:
path = parent_folder / filename
assert path.is_file()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_flow.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
import pytest
from marshmallow import ValidationError
from promptflow import load_flow
from promptflow._sdk.entities._eager_flow import EagerFlow
from promptflow._sdk.entities._flow import ProtectedFlow
from promptflow.exceptions import UserErrorException
FLOWS_DIR = Path("./tests/test_configs/flows")
EAGER_FLOWS_DIR = Path("./tests/test_configs/eager_flows")
@pytest.mark.sdk_test
@pytest.mark.unittest
class TestRun:
@pytest.mark.parametrize(
"kwargs",
[
{"source": EAGER_FLOWS_DIR / "simple_with_yaml"},
{"source": EAGER_FLOWS_DIR / "simple_with_yaml" / "flow.dag.yaml"},
{"source": EAGER_FLOWS_DIR / "simple_without_yaml" / "entry.py", "entry": "my_flow"},
{"source": EAGER_FLOWS_DIR / "multiple_entries" / "entry1.py", "entry": "my_flow1"},
{"source": EAGER_FLOWS_DIR / "multiple_entries" / "entry1.py", "entry": "my_flow2"},
],
)
def test_eager_flow_load(self, kwargs):
flow = load_flow(**kwargs)
assert isinstance(flow, EagerFlow)
@pytest.mark.parametrize(
"kwargs",
[
{"source": FLOWS_DIR / "print_input_flow"},
{"source": FLOWS_DIR / "print_input_flow" / "flow.dag.yaml"},
],
)
def test_dag_flow_load(self, kwargs):
flow = load_flow(**kwargs)
assert isinstance(flow, ProtectedFlow)
def test_flow_load_advanced(self):
flow = load_flow(source=EAGER_FLOWS_DIR / "flow_with_environment")
assert isinstance(flow, EagerFlow)
assert flow._data["environment"] == {"python_requirements_txt": "requirements.txt"}
@pytest.mark.parametrize(
"kwargs, error_message, exception_type",
[
(
{
"source": EAGER_FLOWS_DIR / "multiple_entries" / "entry1.py",
},
"Entry function is not specified",
UserErrorException,
),
(
{
"source": EAGER_FLOWS_DIR / "multiple_entries" / "not_exist.py",
},
"does not exist",
UserErrorException,
),
(
{
"source": EAGER_FLOWS_DIR / "invalid_no_path",
},
"{'path': ['Missing data for required field.']}",
ValidationError,
),
(
{
"source": EAGER_FLOWS_DIR / "invalid_illegal_path",
},
"Can't find directory or file in resolved absolute path:",
ValidationError,
),
(
{"source": EAGER_FLOWS_DIR / "invalid_extra_fields_nodes"},
"{'nodes': ['Unknown field.']}",
ValidationError,
),
],
)
def test_flow_load_invalid(self, kwargs, error_message, exception_type):
with pytest.raises(exception_type) as e:
load_flow(**kwargs)
assert error_message in str(e.value)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_connection.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
from unittest.mock import patch
import pytest
from promptflow._cli._pf._connection import validate_and_interactive_get_secrets
from promptflow._sdk._constants import SCRUBBED_VALUE, CustomStrongTypeConnectionConfigs
from promptflow._sdk._load_functions import _load_env_to_connection
from promptflow._sdk.entities._connection import (
AzureContentSafetyConnection,
AzureOpenAIConnection,
CognitiveSearchConnection,
CustomConnection,
FormRecognizerConnection,
OpenAIConnection,
QdrantConnection,
SerpConnection,
WeaviateConnection,
_Connection,
)
from promptflow._utils.yaml_utils import load_yaml
from promptflow.exceptions import UserErrorException
TEST_ROOT = Path(__file__).parent.parent.parent
CONNECTION_ROOT = TEST_ROOT / "test_configs/connections"
@pytest.mark.unittest
class TestConnection:
@pytest.mark.parametrize(
"file_name, class_name, init_param, expected",
[
(
"azure_openai_connection.yaml",
AzureOpenAIConnection,
{
"name": "my_azure_open_ai_connection",
"api_type": "azure",
"api_version": "2023-07-01-preview",
"api_key": "<to-be-replaced>",
"api_base": "aoai-api-endpoint",
},
{
"module": "promptflow.connections",
"type": "azure_open_ai",
},
),
(
"openai_connection.yaml",
OpenAIConnection,
{
"name": "my_open_ai_connection",
"api_key": "<to-be-replaced>",
"organization": "org",
},
{
"module": "promptflow.connections",
"type": "open_ai",
},
),
(
"openai_connection_base_url.yaml",
OpenAIConnection,
{
"name": "my_open_ai_connection",
"api_key": "<to-be-replaced>",
"organization": "org",
"base_url": "custom_base_url",
},
{
"module": "promptflow.connections",
"type": "open_ai",
},
),
(
"custom_connection.yaml",
CustomConnection,
{
"name": "my_custom_connection",
"configs": {"key1": "test1"},
"secrets": {"key2": "test2"},
},
{
"module": "promptflow.connections",
"type": "custom",
},
),
(
"azure_content_safety_connection.yaml",
AzureContentSafetyConnection,
{
"name": "my_azure_content_safety_connection",
"api_key": "<to-be-replaced>",
"endpoint": "endpoint",
"api_version": "2023-04-30-preview",
"api_type": "Content Safety",
},
{
"module": "promptflow.connections",
"type": "azure_content_safety",
},
),
(
"cognitive_search_connection.yaml",
CognitiveSearchConnection,
{
"name": "my_cognitive_search_connection",
"api_key": "<to-be-replaced>",
"api_base": "endpoint",
"api_version": "2023-07-01-Preview",
},
{
"module": "promptflow.connections",
"type": "cognitive_search",
},
),
(
"serp_connection.yaml",
SerpConnection,
{
"name": "my_serp_connection",
"api_key": "<to-be-replaced>",
},
{
"module": "promptflow.connections",
"type": "serp",
},
),
(
"form_recognizer_connection.yaml",
FormRecognizerConnection,
{
"name": "my_form_recognizer_connection",
"api_key": "<to-be-replaced>",
"endpoint": "endpoint",
"api_version": "2023-07-31",
"api_type": "Form Recognizer",
},
{
"module": "promptflow.connections",
"type": "form_recognizer",
},
),
(
"qdrant_connection.yaml",
QdrantConnection,
{
"name": "my_qdrant_connection",
"api_key": "<to-be-replaced>",
"api_base": "endpoint",
},
{
"module": "promptflow_vectordb.connections",
"type": "qdrant",
},
),
(
"weaviate_connection.yaml",
WeaviateConnection,
{
"name": "my_weaviate_connection",
"api_key": "<to-be-replaced>",
"api_base": "endpoint",
},
{
"module": "promptflow_vectordb.connections",
"type": "weaviate",
},
),
],
)
def test_connection_load_dump(self, file_name, class_name, init_param, expected):
conn = _Connection._load(data=load_yaml(CONNECTION_ROOT / file_name))
expected = {**expected, **init_param}
assert dict(conn._to_dict()) == expected
assert class_name(**init_param)._to_dict() == expected
def test_connection_load_from_env(self):
connection = _load_env_to_connection(source=CONNECTION_ROOT / ".env", params_override=[{"name": "env_conn"}])
assert connection._to_dict() == {
"name": "env_conn",
"module": "promptflow.connections",
"type": "custom",
"configs": {},
"secrets": {"aaa": "bbb", "ccc": "ddd"},
}
assert (
connection.__str__()
== """name: env_conn
module: promptflow.connections
type: custom
configs: {}
secrets:
aaa: bbb
ccc: ddd
"""
)
def test_connection_load_from_env_file_bad_case(self):
# Test file not found
with pytest.raises(FileNotFoundError) as e:
_load_env_to_connection(source=CONNECTION_ROOT / "mock.env", params_override=[{"name": "env_conn"}])
assert "not found" in str(e.value)
# Test file empty
with pytest.raises(Exception) as e:
_load_env_to_connection(source=CONNECTION_ROOT / "empty.env", params_override=[{"name": "env_conn"}])
assert "Load nothing" in str(e.value)
def test_to_execution_connection_dict(self):
# Assert custom connection build
connection = CustomConnection(name="test_connection", configs={"a": "1"}, secrets={"b": "2"})
assert connection._to_execution_connection_dict() == {
"module": "promptflow.connections",
"secret_keys": ["b"],
"type": "CustomConnection",
"value": {"a": "1", "b": "2"},
}
# Assert strong type - AzureOpenAI
connection = AzureOpenAIConnection(
name="test_connection_1",
type="AzureOpenAI",
api_key="test_key",
api_base="test_base",
api_type="azure",
api_version="2023-07-01-preview",
)
assert connection._to_execution_connection_dict() == {
"module": "promptflow.connections",
"secret_keys": ["api_key"],
"type": "AzureOpenAIConnection",
"value": {
"api_base": "test_base",
"api_key": "test_key",
"api_type": "azure",
"api_version": "2023-07-01-preview",
},
}
# Assert strong type - OpenAI
connection = OpenAIConnection(
name="test_connection_1",
type="AzureOpenAI",
api_key="test_key",
organization="test_org",
)
assert connection._to_execution_connection_dict() == {
"module": "promptflow.connections",
"secret_keys": ["api_key"],
"type": "OpenAIConnection",
"value": {"api_key": "test_key", "organization": "test_org"},
}
def test_validate_and_interactive_get_secrets(self):
# Path 1: Create
connection = CustomConnection(
name="test_connection",
secrets={"key1": SCRUBBED_VALUE, "key2": "", "key3": "<no-change>", "key4": "<user-input>", "key5": "**"},
)
with patch("promptflow._cli._pf._connection.get_secret_input", new=lambda prompt: "test_value"):
validate_and_interactive_get_secrets(connection, is_update=False)
assert connection.secrets == {
"key1": "test_value",
"key2": "test_value",
"key3": "test_value",
"key4": "test_value",
"key5": "test_value",
}
# Path 2: Update
# Scrubbed value will be filled in _validate_and_encrypt_secrets for update, so no changes here.
connection = CustomConnection(
name="test_connection",
secrets={"key1": SCRUBBED_VALUE, "key2": "", "key3": "<no-change>", "key4": "<user-input>", "key5": "**"},
)
with patch("promptflow._cli._pf._connection.get_secret_input", new=lambda prompt: "test_value"):
validate_and_interactive_get_secrets(connection, is_update=True)
assert connection.secrets == {
"key1": SCRUBBED_VALUE,
"key2": "",
"key3": "<no-change>",
"key4": "test_value",
"key5": "**",
}
def test_validate_and_encrypt_secrets(self):
# Path 1: Create
connection = CustomConnection(
name="test_connection",
secrets={"key1": SCRUBBED_VALUE, "key2": "", "key3": "<no-change>", "key4": "<user-input>", "key5": "**"},
)
with pytest.raises(Exception) as e:
connection._validate_and_encrypt_secrets()
assert "secrets ['key1', 'key2', 'key3', 'key4', 'key5'] value invalid, please fill them" in str(e.value)
# Path 2: Update
connection._secrets = {"key1": "val1", "key2": "val2", "key4": "val4", "key5": "*"}
# raise error for key3 as original value missing.
# raise error for key5 as original value still scrubbed.
# raise error for key4 even if it was in _secrets, because it requires <user-input>.
with pytest.raises(Exception) as e:
connection._validate_and_encrypt_secrets()
assert "secrets ['key3', 'key4', 'key5'] value invalid, please fill them" in str(e.value)
def test_convert_to_custom_strong_type(self, install_custom_tool_pkg):
module_name = "my_tool_package.tools.my_tool_2"
custom_conn_type = "MyFirstConnection"
import importlib
module = importlib.import_module(module_name)
# Connection created by custom strong type connection template for package tool
connection = CustomConnection(
name="test_connection",
configs={
"a": "1",
CustomStrongTypeConnectionConfigs.PROMPTFLOW_MODULE_KEY: module_name,
CustomStrongTypeConnectionConfigs.PROMPTFLOW_TYPE_KEY: custom_conn_type,
},
secrets={"b": "2"},
)
res = connection._convert_to_custom_strong_type()
assert isinstance(res, module.MyFirstConnection)
assert res.secrets == {"b": "2"}
# Connection created by custom connection template for script tool
connection = CustomConnection(name="test_connection", configs={"a": "1"}, secrets={"b": "2"})
res = connection._convert_to_custom_strong_type(module=module, to_class=custom_conn_type)
assert isinstance(res, module.MyFirstConnection)
assert res.configs == {"a": "1"}
# Connection created with custom connection type in portal for package tool
connection._convert_to_custom_strong_type(module=module_name, to_class=custom_conn_type)
assert isinstance(res, module.MyFirstConnection)
assert res.configs == {"a": "1"}
# Invalid module
module_name = "not_existing_module"
with pytest.raises(ModuleNotFoundError, match=r".*No module named 'not_existing_module'*"):
connection._convert_to_custom_strong_type(module=module_name, to_class=custom_conn_type)
module_name = None
with pytest.raises(
UserErrorException,
match=r".*Failed to convert to custom strong type connection because of invalid module or class*",
):
connection._convert_to_custom_strong_type(module=module_name, to_class=custom_conn_type)
custom_conn_type = None
with pytest.raises(
UserErrorException,
match=r".*Failed to convert to custom strong type connection because of invalid module or class*",
):
connection._convert_to_custom_strong_type(module=module_name, to_class=custom_conn_type)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_orm.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import uuid
import pytest
from sqlalchemy import TEXT, Column, create_engine, inspect, text
from sqlalchemy.orm import declarative_base, sessionmaker
from promptflow._sdk._constants import HOME_PROMPT_FLOW_DIR
from promptflow._sdk._orm.session import create_or_update_table, support_transaction
TABLENAME = "orm_entity"
def random_string() -> str:
return str(uuid.uuid4())
def dump(obj, engine) -> None:
session_maker = sessionmaker(bind=engine)
with session_maker() as session:
session.add(obj)
session.commit()
class SchemaV1(declarative_base()):
__tablename__ = TABLENAME
column1 = Column(TEXT, primary_key=True)
column2 = Column(TEXT)
__pf_schema_version__ = "1"
@staticmethod
def generate(engine) -> None:
entity = SchemaV1(column1=random_string(), column2=random_string())
dump(entity, engine)
return
class SchemaV2(declarative_base()):
__tablename__ = TABLENAME
column1 = Column(TEXT, primary_key=True)
column2 = Column(TEXT)
column3 = Column(TEXT)
__pf_schema_version__ = "2"
@staticmethod
def generate(engine) -> None:
entity = SchemaV2(column1=random_string(), column2=random_string(), column3=random_string())
dump(entity, engine)
return
class SchemaV3(declarative_base()):
__tablename__ = TABLENAME
column1 = Column(TEXT, primary_key=True)
column2 = Column(TEXT)
column3 = Column(TEXT)
column4 = Column(TEXT)
__pf_schema_version__ = "3"
@staticmethod
def generate(engine) -> None:
entity = SchemaV3(
column1=random_string(), column2=random_string(), column3=random_string(), column4=random_string()
)
dump(entity, engine)
return
# exactly same schema as SchemaV3
class SchemaV4(declarative_base()):
__tablename__ = TABLENAME
column1 = Column(TEXT, primary_key=True)
column2 = Column(TEXT)
column3 = Column(TEXT)
column4 = Column(TEXT)
__pf_schema_version__ = "4"
@staticmethod
def generate(engine) -> None:
entity = SchemaV4(
column1=random_string(), column2=random_string(), column3=random_string(), column4=random_string()
)
dump(entity, engine)
return
def mock_use(engine, orm_class, entity_num: int = 1) -> None:
create_or_update_table(engine, orm_class, TABLENAME)
for _ in range(entity_num):
orm_class.generate(engine)
def generate_engine():
db_path = (HOME_PROMPT_FLOW_DIR / ".test" / f"{uuid.uuid4()}.sqlite").resolve()
if not db_path.parent.is_dir():
db_path.parent.mkdir(parents=True, exist_ok=True)
return create_engine(f"sqlite:///{str(db_path)}", future=True)
@pytest.mark.sdk_test
@pytest.mark.unittest
class TestSchemaManagement:
def test_fixed_version(self) -> None:
engine = generate_engine()
mock_use(engine, SchemaV3)
mock_use(engine, SchemaV3, entity_num=2)
mock_use(engine, SchemaV3, entity_num=3)
# 1 table
assert inspect(engine).has_table(TABLENAME)
# 6 rows
entities = [entity for entity in sessionmaker(bind=engine)().query(SchemaV3).all()]
assert len(entities) == 6
def test_version_upgrade(self) -> None:
engine = generate_engine()
mock_use(engine, SchemaV1)
mock_use(engine, SchemaV2)
mock_use(engine, SchemaV3)
# 3 tables: 1 current and 2 legacy
assert inspect(engine).has_table(TABLENAME)
assert inspect(engine).has_table(f"{TABLENAME}_v1")
assert inspect(engine).has_table(f"{TABLENAME}_v2")
# 2 rows in current table
entities = [entity for entity in sessionmaker(bind=engine)().query(SchemaV3).all()]
assert len(entities) == 3
def test_version_downgrade(self, capfd) -> None:
engine = generate_engine()
mock_use(engine, SchemaV3)
mock_use(engine, SchemaV2)
mock_use(engine, SchemaV1)
# 1 table
assert inspect(engine).has_table(TABLENAME)
# 2 rows
entities = [entity for entity in sessionmaker(bind=engine)().query(SchemaV1).all()]
assert len(entities) == 3
# with warning message
out, _ = capfd.readouterr()
assert "While we will do our best to ensure compatibility, " in out
def test_version_mixing(self) -> None:
engine = generate_engine()
mock_use(engine, SchemaV2, entity_num=2)
mock_use(engine, SchemaV3, entity_num=3) # 1 upgrade
mock_use(engine, SchemaV2, entity_num=1)
mock_use(engine, SchemaV1, entity_num=4)
mock_use(engine, SchemaV3, entity_num=2)
# 2 tables: 1 current and 1 legacy
assert inspect(engine).has_table(TABLENAME)
assert inspect(engine).has_table(f"{TABLENAME}_v2")
# 12(all) rows in current table
entities = [entity for entity in sessionmaker(bind=engine)().query(SchemaV3).all()]
assert len(entities) == 12
def test_version_across_same_schema_version(self, capfd) -> None:
engine = generate_engine()
# when 3->4, no warning message
mock_use(engine, SchemaV3)
mock_use(engine, SchemaV4)
out, _ = capfd.readouterr()
assert "While we will do our best to ensure compatibility, " not in out
# same schema, no warning message
mock_use(engine, SchemaV4)
out, _ = capfd.readouterr()
assert "While we will do our best to ensure compatibility, " not in out
# when 4->3, warning message on upgrade should be printed
mock_use(engine, SchemaV3)
out, _ = capfd.readouterr()
assert "While we will do our best to ensure compatibility, " in out
def test_db_without_schema_info(self) -> None:
engine = generate_engine()
# manually create a table to avoid creation of schema_info table
with engine.begin() as connection:
connection.execute(text(f"CREATE TABLE {TABLENAME} (column1 TEXT PRIMARY KEY);"))
connection.execute(
text(f"INSERT INTO {TABLENAME} (column1) VALUES (:column1);"),
{"column1": random_string()},
)
mock_use(engine, SchemaV3)
# 2 tables: 1 current and 1 legacy with name containing timestamp
assert inspect(engine).has_table(TABLENAME)
# 2 rows in current table
entities = [entity for entity in sessionmaker(bind=engine)().query(SchemaV3).all()]
assert len(entities) == 2
@pytest.mark.sdk_test
@pytest.mark.unittest
class TestTransaction:
def test_commit(self) -> None:
engine = generate_engine()
engine = support_transaction(engine)
tablename = "transaction_test"
sql = f"CREATE TABLE {tablename} (id INTEGER PRIMARY KEY);"
with engine.begin() as connection:
connection.execute(text(sql))
connection.commit()
assert inspect(engine).has_table(tablename)
def test_rollback(self) -> None:
engine = generate_engine()
engine = support_transaction(engine)
tablename = "transaction_test"
sql = f"CREATE TABLE {tablename} (id INTEGER PRIMARY KEY);"
with engine.begin() as connection:
connection.execute(text(sql))
connection.rollback()
assert not inspect(engine).has_table(tablename)
def test_exception_during_transaction(self) -> None:
engine = generate_engine()
engine = support_transaction(engine)
tablename = "transaction_test"
sql = f"CREATE TABLE {tablename} (id INTEGER PRIMARY KEY);"
try:
with engine.begin() as connection:
connection.execute(text(sql))
# raise exception, so that SQLAlchemy should help rollback
raise Exception("test exception")
except Exception:
pass
assert not inspect(engine).has_table(tablename)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_flow_serve.py | from pathlib import Path
import pytest
from sdk_cli_test.conftest import MODEL_ROOT
from promptflow._cli._pf._flow import _resolve_python_flow_additional_includes
@pytest.mark.unittest
def test_flow_serve_resolve_additional_includes():
# Assert flow path not changed if no additional includes
flow_path = (Path(MODEL_ROOT) / "web_classification").resolve().absolute().as_posix()
resolved_flow_path = _resolve_python_flow_additional_includes(flow_path)
assert flow_path == resolved_flow_path
# Assert additional includes are resolved correctly
flow_path = (Path(MODEL_ROOT) / "web_classification_with_additional_include").resolve().absolute().as_posix()
resolved_flow_path = _resolve_python_flow_additional_includes(flow_path)
assert (Path(resolved_flow_path) / "convert_to_dict.py").exists()
assert (Path(resolved_flow_path) / "fetch_text_content_from_url.py").exists()
assert (Path(resolved_flow_path) / "summarize_text_content.jinja2").exists()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_flow_invoker.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
import pytest
from promptflow._sdk._serving._errors import UnexpectedConnectionProviderReturn, UnsupportedConnectionProvider
from promptflow._sdk._serving.flow_invoker import FlowInvoker
from promptflow.exceptions import UserErrorException
PROMOTFLOW_ROOT = Path(__file__).parent.parent.parent.parent
FLOWS_DIR = Path(PROMOTFLOW_ROOT / "tests/test_configs/flows")
EXAMPLE_FLOW = FLOWS_DIR / "web_classification"
@pytest.mark.sdk_test
@pytest.mark.unittest
class TestFlowInvoker:
# Note: e2e test of flow invoker has been covered by test_flow_serve.
def test_flow_invoker_unsupported_connection_provider(self):
with pytest.raises(UnsupportedConnectionProvider):
FlowInvoker(flow=EXAMPLE_FLOW, connection_provider=[])
with pytest.raises(UserErrorException):
FlowInvoker(flow=EXAMPLE_FLOW, connection_provider="unsupported")
def test_flow_invoker_custom_connection_provider(self):
# Return is not a list
with pytest.raises(UnexpectedConnectionProviderReturn) as e:
FlowInvoker(flow=EXAMPLE_FLOW, connection_provider=lambda: {})
assert "should return a list of connections" in str(e.value)
# Return is not connection type
with pytest.raises(UnexpectedConnectionProviderReturn) as e:
FlowInvoker(flow=EXAMPLE_FLOW, connection_provider=lambda: [1, 2])
assert "should be connection type" in str(e.value)
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_pf_client.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
from promptflow import PFClient
from promptflow._sdk._utils import ClientUserAgentUtil
@pytest.mark.sdk_test
@pytest.mark.e2etest
class TestPFClient:
def test_pf_client_user_agent(self):
PFClient()
assert "promptflow-sdk" in ClientUserAgentUtil.get_user_agent()
assert "promptflow/" not in ClientUserAgentUtil.get_user_agent()
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_tool.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import importlib.util
from pathlib import Path
import pytest
TOOL_DIR = Path("./tests/test_configs/tools")
@pytest.mark.unittest
class TestTool:
def get_tool_meta_by_path(self, client, tool_path, module_name):
# Load the module from the file path
spec = importlib.util.spec_from_file_location(module_name, tool_path)
tool_module = importlib.util.module_from_spec(spec)
# Load the module's code
spec.loader.exec_module(tool_module)
# List meta data of tools
tool_meta = client.tools._generate_tool_meta(tool_module)
return tool_meta
def test_python_tool_meta(self, pf):
tool_path = TOOL_DIR / "python_tool.py"
tools_meta, _ = self.get_tool_meta_by_path(pf, tool_path, "python_tool")
# Get python script tool meta
expect_tools_meta = {
"python_tool.my_python_tool": {
"name": "python_tool",
"type": "python",
"inputs": {"input1": {"type": ["string"]}},
"module": "python_tool",
"function": "my_python_tool",
},
"python_tool.my_python_tool_without_name": {
"name": "my_python_tool_without_name",
"type": "python",
"inputs": {"input1": {"type": ["string"]}},
"module": "python_tool",
"function": "my_python_tool_without_name",
},
"python_tool.PythonTool.python_tool": {
"name": "PythonTool.python_tool",
"type": "python",
"inputs": {"connection": {"type": ["AzureOpenAIConnection"]}, "input1": {"type": ["string"]}},
"module": "python_tool",
"class_name": "PythonTool",
"function": "python_tool",
},
}
assert tools_meta == expect_tools_meta
def test_custom_tool_meta(self, pf):
tool_path = TOOL_DIR / "custom_llm_tool.py"
tools_meta, _ = self.get_tool_meta_by_path(pf, tool_path, "custom_llm_tool")
expect_meta = {
"custom_llm_tool.TestCustomLLMTool.tool_func": {
"class_name": "TestCustomLLMTool",
"description": "This is a tool to demonstrate the custom_llm tool type",
"enable_kwargs": True,
"function": "tool_func",
"inputs": {"api": {"type": ["string"]}, "connection": {"type": ["AzureOpenAIConnection"]}},
"module": "custom_llm_tool",
"name": "My Custom LLM Tool",
"type": "custom_llm",
},
"custom_llm_tool.my_tool": {
"description": "This is a tool to demonstrate the custom_llm tool type",
"enable_kwargs": True,
"function": "my_tool",
"inputs": {"connection": {"type": ["CustomConnection"]}},
"module": "custom_llm_tool",
"name": "My Custom LLM Tool",
"type": "custom_llm",
},
}
assert tools_meta == expect_meta
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_exceptions.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
from azure.core.exceptions import HttpResponseError
from promptflow._sdk._orm import RunInfo
from promptflow.exceptions import _ErrorInfo, ErrorCategory, ErrorTarget, UserErrorException
from promptflow.executor import FlowValidator
from promptflow.executor._errors import InvalidNodeReference
FLOWS_DIR = "./tests/test_configs/flows/print_input_flow"
@pytest.mark.unittest
class TestExceptions:
def test_error_category_with_unknow_error(self, pf):
ex = None
try:
pf.run("./exceptions/flows")
except Exception as e:
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == ErrorCategory.UNKNOWN
assert error_type == "FileNotFoundError"
assert error_target == ErrorTarget.UNKNOWN
assert error_message == ""
assert (
"module=promptflow._sdk._pf_client, "
'code=raise FileNotFoundError(f"flow path {flow} does not exist"), '
"lineno="
) in error_detail
def test_error_category_with_user_error(self, pf):
ex = None
try:
RunInfo.get("run_name")
except Exception as e:
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == ErrorCategory.USER_ERROR
assert error_type == "RunNotFoundError"
assert error_target == ErrorTarget.CONTROL_PLANE_SDK
assert error_message == ""
assert (
"module=promptflow._sdk._orm.run_info, "
'code=raise RunNotFoundError(f"Run name {name!r} cannot be found."), '
"lineno="
) in error_detail
def test_error_category_with_system_error(self):
ex = None
try:
FlowValidator._validate_aggregation_inputs({}, {"input1": "value1"})
except Exception as e:
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == ErrorCategory.SYSTEM_ERROR
assert error_type == "InvalidAggregationInput"
assert error_target == ErrorTarget.UNKNOWN
assert error_message == (
"The input for aggregation is incorrect. "
"The value for aggregated reference input '{input_key}' should be a list, "
"but received {value_type}. "
"Please adjust the input value to match the expected format."
)
assert (
"module=promptflow.executor.flow_validator, " "code=raise InvalidAggregationInput(, " "lineno="
) in error_detail
def test_error_category_with_http_error(self, subscription_id, resource_group_name, workspace_name):
try:
raise HttpResponseError(message="HttpResponseError")
except Exception as e:
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == ErrorCategory.UNKNOWN
assert error_type == "HttpResponseError"
assert error_target == ErrorTarget.UNKNOWN
assert error_message == ""
assert error_detail == ""
@pytest.mark.parametrize(
"status_code, expected_error_category",
[
(203, ErrorCategory.UNKNOWN),
(304, ErrorCategory.UNKNOWN),
(400, ErrorCategory.UNKNOWN),
(401, ErrorCategory.UNKNOWN),
(429, ErrorCategory.UNKNOWN),
(500, ErrorCategory.UNKNOWN),
],
)
def test_error_category_with_status_code(self, status_code, expected_error_category):
try:
raise Exception()
except Exception as e:
e.status_code = status_code
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == expected_error_category
assert error_type == "Exception"
assert error_target == ErrorTarget.UNKNOWN
assert error_message == ""
assert error_detail == ""
def test_error_category_with_executor_error(self):
try:
msg_format = (
"Invalid node definitions found in the flow graph. Non-aggregation node '{invalid_reference}' "
"cannot be referenced in the activate config of the aggregation node '{node_name}'. Please "
"review and rectify the node reference."
)
raise InvalidNodeReference(message_format=msg_format, invalid_reference=None, node_name="node_name")
except Exception as e:
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == ErrorCategory.USER_ERROR
assert error_type == "InvalidNodeReference"
assert error_target == ErrorTarget.EXECUTOR
assert error_message == (
"Invalid node definitions found in the flow graph. Non-aggregation node '{invalid_reference}' "
"cannot be referenced in the activate config of the aggregation node '{node_name}'. Please "
"review and rectify the node reference."
)
assert error_detail == ""
def test_error_category_with_cause_exception1(self):
"""cause exception is PromptflowException and e is PromptflowException, recording e."""
ex = None
try:
try:
FlowValidator._validate_aggregation_inputs({}, {"input1": "value1"})
except Exception as e:
raise UserErrorException("FlowValidator._validate_aggregation_inputs failed") from e
except Exception as e:
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == ErrorCategory.USER_ERROR
assert error_type == "InvalidAggregationInput"
assert error_target == ErrorTarget.UNKNOWN
assert error_message == ""
assert error_detail == ""
ex = None
try:
try:
FlowValidator._validate_aggregation_inputs({}, {"input1": "value1"})
except Exception as e:
raise UserErrorException(message=str(e), error=e)
except Exception as e:
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == ErrorCategory.USER_ERROR
assert error_type == "InvalidAggregationInput"
assert error_target == ErrorTarget.UNKNOWN
assert error_message == ""
assert error_detail == ""
def test_error_category_with_cause_exception2(self):
"""cause exception is PromptflowException and e is not PromptflowException, recording cause exception."""
ex = None
try:
try:
FlowValidator._validate_aggregation_inputs({}, {"input1": "value1"})
except Exception as e:
raise Exception("FlowValidator._validate_aggregation_inputs failed") from e
except Exception as e:
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == ErrorCategory.SYSTEM_ERROR
assert error_type == "InvalidAggregationInput"
assert error_target == ErrorTarget.UNKNOWN
assert error_message == (
"The input for aggregation is incorrect. The value for aggregated reference "
"input '{input_key}' should be a list, but received {value_type}. Please "
"adjust the input value to match the expected format."
)
assert (
"module=promptflow.executor.flow_validator, " "code=raise InvalidAggregationInput(, " "lineno="
) in error_detail
def test_error_category_with_cause_exception3(self, pf):
"""cause exception is not PromptflowException and e is not PromptflowException, recording e exception."""
ex = None
try:
try:
pf.run("./exceptions/flows")
except Exception as e:
raise Exception("pf run failed") from e
except Exception as e:
ex = e
error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex)
assert error_category == ErrorCategory.UNKNOWN
assert error_type == "Exception"
assert error_target == ErrorTarget.UNKNOWN
assert error_message == ""
assert error_detail == ""
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_run.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import uuid
from pathlib import Path
from unittest.mock import patch
import pytest
from marshmallow import ValidationError
from promptflow._sdk._constants import BASE_PATH_CONTEXT_KEY, NODES
from promptflow._sdk._errors import InvalidFlowError
from promptflow._sdk._load_functions import load_run
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk._run_functions import create_yaml_run
from promptflow._sdk._submitter import RunSubmitter, overwrite_variant, variant_overwrite_context
from promptflow._sdk.entities import Run
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
from promptflow._utils.yaml_utils import load_yaml
PROMOTFLOW_ROOT = Path(__file__) / "../../../.."
FLOWS_DIR = Path("./tests/test_configs/flows")
RUNS_DIR = Path("./tests/test_configs/runs")
DATAS_DIR = Path("./tests/test_configs/datas")
@pytest.mark.sdk_test
@pytest.mark.unittest
class TestRun:
def test_overwrite_variant_context(self):
with variant_overwrite_context(
flow_path=FLOWS_DIR / "web_classification", tuning_node="summarize_text_content", variant="variant_0"
) as flow:
with open(flow.path) as f:
flow_dag = load_yaml(f)
node_name_2_node = {node["name"]: node for node in flow_dag[NODES]}
node = node_name_2_node["summarize_text_content"]
assert node["inputs"]["temperature"] == "0.2"
def test_overwrite_connections(self):
with variant_overwrite_context(
flow_path=FLOWS_DIR / "web_classification",
connections={"classify_with_llm": {"connection": "azure_open_ai", "deployment_name": "gpt-35-turbo"}},
) as flow:
with open(flow.path) as f:
flow_dag = load_yaml(f)
node_name_2_node = {node["name"]: node for node in flow_dag[NODES]}
node = node_name_2_node["classify_with_llm"]
assert node["connection"] == "azure_open_ai"
assert node["inputs"]["deployment_name"] == "gpt-35-turbo"
@pytest.mark.parametrize(
"connections, error_message",
[
(
{
"classify_with_llm": {
"connection": "azure_open_ai",
"deployment_name": "gpt-35-turbo",
"unsupported": 1,
}
},
"Unsupported llm connection overwrite keys",
),
("str", "Invalid connections overwrite format: str"),
({"not_exist": 1}, "Node not_exist not found in flow"),
({"classify_with_llm": 1}, "Invalid connection overwrite format: 1, only dict is supported."),
],
)
def test_overwrite_connections_invalid(self, connections, error_message):
with pytest.raises(InvalidFlowError) as e:
with variant_overwrite_context(
flow_path=FLOWS_DIR / "web_classification",
connections=connections,
):
pass
assert error_message in str(e.value)
def test_load_run(self):
input_dict = {
"data": (DATAS_DIR / "webClassification1.jsonl").resolve().as_posix(),
"column_mapping": {"context": "${data.context}"},
"flow": (FLOWS_DIR / "web_classification").resolve().as_posix(),
}
bulk_run = Run._load_from_dict(
data=input_dict, context={BASE_PATH_CONTEXT_KEY: FLOWS_DIR}, additional_message=""
)
assert isinstance(bulk_run, Run)
def test_dot_env_resolve(self):
run_id = str(uuid.uuid4())
source = f"{RUNS_DIR}/sample_bulk_run.yaml"
run = load_run(source=source, params_override=[{"name": run_id}])
assert run.environment_variables == {"FOO": "BAR"}
def test_run_invalid_flow_path(self):
run_id = str(uuid.uuid4())
source = f"{RUNS_DIR}/bulk_run_invalid_flow_path.yaml"
with pytest.raises(ValidationError) as e:
load_run(source=source, params_override=[{"name": run_id}])
assert "Can't find directory or file in resolved absolute path:" in str(e.value)
def test_run_invalid_remote_flow(self):
run_id = str(uuid.uuid4())
source = f"{RUNS_DIR}/bulk_run_invalid_remote_flow_str.yaml"
with pytest.raises(ValidationError) as e:
load_run(source=source, params_override=[{"name": run_id}])
assert "Invalid remote flow path. Currently only azureml:<flow-name> is supported" in str(e.value)
def test_data_not_exist_validation_error(self):
source = f"{RUNS_DIR}/sample_bulk_run.yaml"
with pytest.raises(ValidationError) as e:
load_run(source=source, params_override=[{"data": "not_exist"}])
assert "Can't find directory or file" in str(e.value)
assert "Invalid remote path." in str(e.value)
@pytest.mark.parametrize(
"source, error_msg",
[
(f"{RUNS_DIR}/illegal/non_exist_data.yaml", "Can't find directory or file"),
],
)
def test_invalid_yaml(self, source, error_msg):
with pytest.raises(ValidationError) as e:
create_yaml_run(source=source)
assert error_msg in str(e.value)
def test_run_bulk_invalid_params(self, pf):
# Test if function raises FileNotFoundError
with pytest.raises(FileNotFoundError):
pf.run(flow="invalid_path", data="fake_data")
with pytest.raises(FileNotFoundError):
pf.run(flow="invalid_path", data="fake_data", batch_run="fake_run")
def test_overwrite_variant(self):
flow_dag = {
"nodes": [
{
"name": "node1",
"use_variants": True,
"variant_id": "default",
"inputs": {
"param1": "value1",
"param2": "value2",
},
},
],
"node_variants": {
"node1": {
"default_variant_id": "variant1",
"variants": {
"variant1": {
"node": {
"inputs": {
"param1": "value1_variant1",
"param2": "value2_variant1",
},
},
},
},
},
},
}
# Test if function raises InvalidFlowError
with pytest.raises(InvalidFlowError):
overwrite_variant(flow_dag, "node3", "variant1")
with pytest.raises(InvalidFlowError):
overwrite_variant(flow_dag, "node1", "variant3")
# Test if function overwrites variant correctly
dag = copy.deepcopy(flow_dag)
overwrite_variant(dag, "node1", "variant1")
assert dag["nodes"][0]["inputs"]["param1"] == "value1_variant1"
assert dag["nodes"][0]["inputs"]["param2"] == "value2_variant1"
# test overwrite default variant
dag = copy.deepcopy(flow_dag)
overwrite_variant(dag)
assert dag["nodes"][0]["inputs"]["param1"] == "value1_variant1"
assert dag["nodes"][0]["inputs"]["param2"] == "value2_variant1"
@patch("promptflow._sdk.operations._run_operations.RunOperations.update")
def test_submit(self, mock_update):
# Define input parameters
flow_path = f"{FLOWS_DIR}/web_classification"
client = PFClient()
run_submitter = RunSubmitter(client.runs)
run = Run(
name=str(uuid.uuid4()),
flow=Path(flow_path),
data=f"{DATAS_DIR}/webClassification3.jsonl",
)
# Submit run
run_submitter.submit(run)
# Check if Run.update method was called
mock_update.assert_called_once()
def test_flow_run_with_non_english_inputs(self, pf):
flow_path = f"{FLOWS_DIR}/flow_with_non_english_input"
data = f"{FLOWS_DIR}/flow_with_non_english_input/data.jsonl"
run = pf.run(flow=flow_path, data=data, column_mapping={"text": "${data.text}"})
local_storage = LocalStorageOperations(run=run)
# assert non english in output.jsonl
output_jsonl_path = local_storage._outputs_path
with open(output_jsonl_path, "r", encoding="utf-8") as f:
outputs_text = f.readlines()
assert outputs_text == [
'{"line_number": 0, "output": "Hello 123 日本語"}\n',
'{"line_number": 1, "output": "World 123 日本語"}\n',
]
# assert non english in memory
outputs = local_storage.load_outputs()
assert outputs == {"output": ["Hello 123 日本語", "World 123 日本語"]}
@pytest.mark.usefixtures("enable_logger_propagate")
def test_flow_run_with_unknown_field(self, caplog):
run_yaml = Path(RUNS_DIR) / "sample_bulk_run.yaml"
load_run(source=run_yaml, params_override=[{"unknown_field": "unknown_value"}])
assert "Unknown fields found" in caplog.text
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_local_storage_operations.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pandas as pd
import pytest
from promptflow._sdk._constants import LINE_NUMBER
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
@pytest.mark.unittest
class TestLocalStorageOperations:
def test_outputs_padding(self) -> None:
data = [
{LINE_NUMBER: 1, "col": "a"},
{LINE_NUMBER: 2, "col": "b"},
]
df = pd.DataFrame(data)
df_with_padding = LocalStorageOperations._outputs_padding(df, inputs_line_numbers=[0, 1, 2, 3, 4])
df_with_padding.fillna("", inplace=True)
assert len(df_with_padding) == 5
assert df_with_padding.iloc[0].to_dict() == {LINE_NUMBER: 0, "col": ""}
assert df_with_padding.iloc[1].to_dict() == {LINE_NUMBER: 1, "col": "a"}
assert df_with_padding.iloc[2].to_dict() == {LINE_NUMBER: 2, "col": "b"}
assert df_with_padding.iloc[3].to_dict() == {LINE_NUMBER: 3, "col": ""}
assert df_with_padding.iloc[4].to_dict() == {LINE_NUMBER: 4, "col": ""}
# in evaluation run, inputs may not have all line number
df_with_padding = LocalStorageOperations._outputs_padding(df, inputs_line_numbers=[1, 2, 4])
df_with_padding.fillna("", inplace=True)
assert len(df_with_padding) == 3
assert df_with_padding.iloc[0].to_dict() == {LINE_NUMBER: 1, "col": "a"}
assert df_with_padding.iloc[1].to_dict() == {LINE_NUMBER: 2, "col": "b"}
assert df_with_padding.iloc[2].to_dict() == {LINE_NUMBER: 4, "col": ""}
| 0 |
promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test | promptflow_repo/promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_utils.py | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import datetime
import importlib
import json
import os
import shutil
import sys
import tempfile
import threading
import time
from pathlib import Path
from unittest.mock import patch
import mock
import pandas as pd
import pytest
from requests import Response
from promptflow._cli._params import AppendToDictAction
from promptflow._cli._utils import (
_build_sorted_column_widths_tuple_list,
_calculate_column_widths,
list_of_dict_to_nested_dict,
)
from promptflow._constants import LAST_CHECK_TIME, PF_VERSION_CHECK
from promptflow._sdk._constants import HOME_PROMPT_FLOW_DIR, PROMPT_FLOW_HOME_DIR_ENV_VAR
from promptflow._sdk._errors import GenerateFlowToolsJsonError
from promptflow._sdk._telemetry.logging_handler import get_scrubbed_cloud_role
from promptflow._sdk._utils import (
_generate_connections_dir,
decrypt_secret_value,
encrypt_secret_value,
generate_flow_tools_json,
override_connection_config_with_environment_variable,
refresh_connections_dir,
resolve_connections_environment_variable_reference,
snake_to_camel,
)
from promptflow._utils.load_data import load_data
from promptflow._utils.retry_utils import http_retry_wrapper, retry
from promptflow._utils.version_hint_utils import check_latest_version
TEST_ROOT = Path(__file__).parent.parent.parent
CONNECTION_ROOT = TEST_ROOT / "test_configs/connections"
@pytest.mark.unittest
class TestUtils:
def test_encrypt_decrypt_value(self):
test_value = "test"
encrypted = encrypt_secret_value(test_value)
assert decrypt_secret_value("mock", encrypted) == test_value
def test_snake_to_camel(self):
assert snake_to_camel("test_snake_case") == "TestSnakeCase"
assert snake_to_camel("TestSnakeCase") == "TestSnakeCase"
def test_sqlite_retry(self, capfd) -> None:
from sqlalchemy.exc import OperationalError
from promptflow._sdk._orm.retry import sqlite_retry
@sqlite_retry
def mock_sqlite_op() -> None:
print("sqlite op...")
raise OperationalError("statement", "params", "orig")
# it will finally raise an OperationalError
with pytest.raises(OperationalError):
mock_sqlite_op()
# assert function execution time from stdout
out, _ = capfd.readouterr()
assert out.count("sqlite op...") == 3
def test_resolve_connections_environment_variable_reference(self):
connections = {
"test_connection": {
"type": "AzureOpenAIConnection",
"value": {
"api_key": "${env:AZURE_OPENAI.API_KEY}",
"api_base": "${env:AZURE_OPENAI_API_BASE}",
},
},
"test_custom_connection": {
"type": "CustomConnection",
"value": {"key": "${env:CUSTOM_KEY}", "key2": "value2"},
},
}
with mock.patch.dict(
os.environ, {"AZURE_OPENAI.API_KEY": "KEY", "AZURE_OPENAI_API_BASE": "BASE", "CUSTOM_KEY": "CUSTOM_VALUE"}
):
resolve_connections_environment_variable_reference(connections)
assert connections["test_connection"]["value"]["api_key"] == "KEY"
assert connections["test_connection"]["value"]["api_base"] == "BASE"
assert connections["test_custom_connection"]["value"]["key"] == "CUSTOM_VALUE"
# test bad cases
connections = {
"test_connection": {
"type": "AzureOpenAIConnection",
"value": {"none_value": None, "integer_value": 1, "float_value": 1.0, "dict_value": {}},
},
}
resolve_connections_environment_variable_reference(connections)
assert connections["test_connection"]["value"] == {
"none_value": None,
"integer_value": 1,
"float_value": 1.0,
"dict_value": {},
}
def test_override_connection_config_with_environment_variable(self):
connections = {
"test_connection": {
"type": "AzureOpenAIConnection",
"value": {
"api_key": "KEY",
"api_base": "https://gpt-test-eus.openai.azure.com/",
},
},
"test_custom_connection": {
"type": "CustomConnection",
"value": {"key": "value1", "key2": "value2"},
},
}
with mock.patch.dict(
os.environ, {"TEST_CONNECTION_API_BASE": "BASE", "TEST_CUSTOM_CONNECTION_KEY": "CUSTOM_VALUE"}
):
override_connection_config_with_environment_variable(connections)
assert connections["test_connection"]["value"]["api_key"] == "KEY"
assert connections["test_connection"]["value"]["api_base"] == "BASE"
assert connections["test_custom_connection"]["value"]["key"] == "CUSTOM_VALUE"
assert connections["test_custom_connection"]["value"]["key2"] == "value2"
def test_generate_flow_tools_json(self) -> None:
# call twice to ensure system path won't be affected during generation
for _ in range(2):
flow_src_path = "./tests/test_configs/flows/flow_with_sys_inject"
with tempfile.TemporaryDirectory() as temp_dir:
flow_dst_path = os.path.join(temp_dir, "flow_with_sys_inject")
shutil.copytree(flow_src_path, flow_dst_path)
flow_tools_json = generate_flow_tools_json(flow_dst_path, dump=False)
groundtruth = {
"hello.py": {
"type": "python",
"inputs": {
"input1": {
"type": [
"string",
],
},
},
"source": "hello.py",
"function": "my_python_tool",
}
}
assert flow_tools_json["code"] == groundtruth
def test_generate_flow_tools_json_expecting_fail(self) -> None:
flow_path = "./tests/test_configs/flows/flow_with_invalid_import"
with pytest.raises(GenerateFlowToolsJsonError) as e:
generate_flow_tools_json(flow_path, dump=False)
assert "Generate meta failed, detail error(s):" in str(e.value)
# raise_error = False
flow_tools_json = generate_flow_tools_json(flow_path, dump=False, raise_error=False)
assert len(flow_tools_json["code"]) == 0
@pytest.mark.parametrize(
"python_path, env_hash",
[
("D:\\Tools\\Anaconda3\\envs\\pf\\python.exe", ("a9620c3cdb7ccf3ec9f4005e5b19c12d1e1fef80")),
("/Users/fake_user/anaconda3/envs/pf/bin/python3.10", ("e3f33eadd9be376014eb75a688930930ca83c056")),
],
)
def test_generate_connections_dir(self, python_path, env_hash):
expected_result = (HOME_PROMPT_FLOW_DIR / "envs" / env_hash / "connections").resolve()
with patch.object(sys, "executable", python_path):
result = _generate_connections_dir()
assert result == expected_result
def test_refresh_connections_dir(self):
from promptflow._core.tools_manager import collect_package_tools_and_connections
tools, specs, templates = collect_package_tools_and_connections()
refresh_connections_dir(specs, templates)
conn_dir = _generate_connections_dir()
assert len(os.listdir(conn_dir)) > 0, "No files were generated"
@pytest.mark.parametrize("concurrent_count", [1, 2, 4, 8])
def test_concurrent_execution_of_refresh_connections_dir(self, concurrent_count):
threads = []
# Create and start threads
for _ in range(concurrent_count):
thread = threading.Thread(
target=lambda: refresh_connections_dir(connection_spec_files=[], connection_template_yamls=[])
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def test_concurrent_hint_for_update(self):
def mock_check_latest_version():
time.sleep(5)
check_latest_version()
with patch("promptflow._utils.version_hint_utils.datetime") as mock_datetime, patch(
"promptflow._utils.version_hint_utils.check_latest_version", side_effect=mock_check_latest_version
):
from promptflow._sdk._telemetry import monitor_operation
class HintForUpdate:
@monitor_operation(activity_name="pf.flows.test")
def hint_func(self):
return
current_time = datetime.datetime.now()
mock_datetime.datetime.now.return_value = current_time
mock_datetime.datetime.strptime.return_value = current_time - datetime.timedelta(days=8)
mock_datetime.timedelta.return_value = datetime.timedelta(days=7)
HintForUpdate().hint_func()
assert Path(HOME_PROMPT_FLOW_DIR / PF_VERSION_CHECK).exists()
with open(HOME_PROMPT_FLOW_DIR / PF_VERSION_CHECK, "r") as f:
cached_versions = json.load(f)
# since mock_check_latest_version is a demon thread, it will exit when main thread complete, so
# LAST_CHECK_TIME won't be updated since sleep 5s
assert LAST_CHECK_TIME not in cached_versions or cached_versions[LAST_CHECK_TIME] != str(current_time)
@pytest.mark.parametrize(
"data_path",
[
"./tests/test_configs/datas/load_data_cases/colors.csv",
"./tests/test_configs/datas/load_data_cases/colors.json",
"./tests/test_configs/datas/load_data_cases/colors.jsonl",
"./tests/test_configs/datas/load_data_cases/colors.tsv",
"./tests/test_configs/datas/load_data_cases/colors.parquet",
],
)
def test_load_data(self, data_path: str) -> None:
# for csv and tsv format, all columns will be string;
# for rest, integer will be int and float will be float
is_string = "csv" in data_path or "tsv" in data_path
df = load_data(data_path)
assert len(df) == 3
assert df[0]["name"] == "Red"
assert isinstance(df[0]["id_text"], str)
assert df[0]["id_text"] == "1.0"
if is_string:
assert isinstance(df[0]["id_int"], str)
assert df[0]["id_int"] == "1"
assert isinstance(df[0]["id_float"], str)
assert df[0]["id_float"] == "1.0"
else:
assert isinstance(df[0]["id_int"], int)
assert df[0]["id_int"] == 1
assert isinstance(df[0]["id_float"], float)
assert df[0]["id_float"] == 1.0
@pytest.mark.parametrize(
"data_path",
[
"./tests/test_configs/datas/load_data_cases/10k.jsonl",
"./tests/test_configs/datas/load_data_cases/10k",
],
)
def test_load_10k_data(self, data_path: str) -> None:
df = load_data(data_path)
assert len(df) == 10000
# specify max_rows_count
max_rows_count = 5000
head_rows = load_data(data_path, max_rows_count=max_rows_count)
assert len(head_rows) == max_rows_count
assert head_rows == df[:max_rows_count]
@pytest.mark.parametrize(
"script_name, expected_result",
[
("pfs", "pfs"),
("pfutil.py", "pfutil.py"),
("pf", "pf"),
("pfazure", "pfazure"),
("pf.exe", "pf.exe"),
("pfazure.exe", "pfazure.exe"),
("app.py", "app.py"),
("python -m unittest", "python -m unittest"),
("pytest", "pytest"),
("gunicorn", "gunicorn"),
("ipykernel_launcher.py", "ipykernel_launcher.py"),
("jupyter-notebook", "jupyter-notebook"),
("jupyter-lab", "jupyter-lab"),
("python", "python"),
("Unknown Application", "Unknown Application"),
("unknown_script.py", "***.py"),
("path/to/unknown_script.py", "***.py"),
(r"path\to\unknown_script.py", "***.py"),
('invalid_chars_\\/:*?"<>|', "***"),
],
)
def test_get_scrubbed_cloud_role(self, script_name, expected_result):
with mock.patch("sys.argv", [script_name]):
assert get_scrubbed_cloud_role() == expected_result
def test_configure_pf_home_dir(self, tmpdir) -> None:
from promptflow._sdk import _constants
custom_pf_home_dir_path = Path(tmpdir / ".promptflow").resolve()
assert not custom_pf_home_dir_path.exists()
with patch.dict(os.environ, {PROMPT_FLOW_HOME_DIR_ENV_VAR: custom_pf_home_dir_path.as_posix()}):
importlib.reload(_constants)
assert _constants.HOME_PROMPT_FLOW_DIR.as_posix() == custom_pf_home_dir_path.as_posix()
assert _constants.HOME_PROMPT_FLOW_DIR.is_dir()
importlib.reload(_constants)
def test_configure_pf_home_dir_with_invalid_path(self) -> None:
from promptflow._sdk import _constants
invalid_path = "/invalid:path"
with patch.dict(os.environ, {PROMPT_FLOW_HOME_DIR_ENV_VAR: invalid_path}):
assert os.getenv(PROMPT_FLOW_HOME_DIR_ENV_VAR) == invalid_path
importlib.reload(_constants)
assert _constants.HOME_PROMPT_FLOW_DIR.as_posix() == (Path.home() / ".promptflow").resolve().as_posix()
importlib.reload(_constants)
@pytest.mark.unittest
class TestCLIUtils:
def test_list_of_dict_to_nested_dict(self):
test_list = [{"node1.connection": "a"}, {"node2.deploy_name": "b"}]
result = list_of_dict_to_nested_dict(test_list)
assert result == {"node1": {"connection": "a"}, "node2": {"deploy_name": "b"}}
test_list = [{"node1.connection": "a"}, {"node1.deploy_name": "b"}]
result = list_of_dict_to_nested_dict(test_list)
assert result == {"node1": {"connection": "a", "deploy_name": "b"}}
def test_append_to_dict_action(self):
parser = argparse.ArgumentParser(prog="test_dict_action")
parser.add_argument("--dict", action=AppendToDictAction, nargs="+")
args = ["--dict", "key1=val1", "'key2=val2'", '"key3=val3"', "key4='val4'", "key5=\"val5'"]
args = parser.parse_args(args)
expect_dict = {
"key1": "val1",
"key2": "val2",
"key3": "val3",
"key4": "val4",
"key5": "\"val5'",
}
assert args.dict[0] == expect_dict
def test_build_sorted_column_widths_tuple_list(self) -> None:
columns = ["col1", "col2", "col3"]
values1 = {"col1": 1, "col2": 4, "col3": 3}
values2 = {"col1": 3, "col2": 3, "col3": 1}
margins = {"col1": 1, "col2": 2, "col3": 2}
# sort by (max(values1, values2) + margins)
res = _build_sorted_column_widths_tuple_list(columns, values1, values2, margins)
assert res == [("col2", 6), ("col3", 5), ("col1", 4)]
def test_calculate_column_widths(self) -> None:
data = [
{
"inputs.url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g",
"inputs.answer": "Channel",
"inputs.evidence": "Url",
"outputs.category": "Channel",
"outputs.evidence": "URL",
},
{
"inputs.url": "https://arxiv.org/abs/2307.04767",
"inputs.answer": "Academic",
"inputs.evidence": "Text content",
"outputs.category": "Academic",
"outputs.evidence": "Text content",
},
{
"inputs.url": "https://play.google.com/store/apps/details?id=com.twitter.android",
"inputs.answer": "App",
"inputs.evidence": "Both",
"outputs.category": "App",
"outputs.evidence": "Both",
},
]
df = pd.DataFrame(data)
terminal_width = 120
res = _calculate_column_widths(df, terminal_width)
assert res == [4, 23, 13, 15, 15, 15]
def test_calculate_column_widths_edge_case(self) -> None:
nan = float("nan")
# test case comes from examples/flow/evaluation/eval-qna-non-rag
data = [
{
"inputs.groundtruth": "The Alpine Explorer Tent has the highest rainfly waterproof rating at 3000m",
"inputs.answer": "There are various tents available in the market that offer different levels of waterproofing. However, one tent that is often highly regarded for its waterproofing capabilities is the MSR Hubba Hubba NX tent. It features a durable rainfly and a bathtub-style floor construction, both of which contribute to its excellent water resistance. It is always recommended to read product specifications and customer reviews to ensure you find a tent that meets your specific waterproofing requirements.", # noqa: E501
"inputs.context": "{${data.context}}",
"inputs.question": "Which tent is the most waterproof?",
"inputs.metrics": "gpt_groundedness,f1_score",
"inputs.line_number": 0,
"inputs.ground_truth": "The Alpine Explorer Tent has the highest rainfly waterproof rating at 3000m",
"outputs.line_number": 0,
"outputs.ada_similarity": nan,
"outputs.f1_score": 0.049999999999999996,
"outputs.gpt_coherence": nan,
"outputs.gpt_fluency": nan,
"outputs.gpt_groundedness": 3.0,
"outputs.gpt_relevance": nan,
"outputs.gpt_similarity": nan,
},
{
"inputs.groundtruth": "The Adventure Dining Table has a higher weight capacity than all of the other camping tables mentioned", # noqa: E501
"inputs.answer": "There are various camping tables available that can hold different amounts of weight. Some heavy-duty camping tables can hold up to 300 pounds or more, while others may have lower weight capacities. It's important to check the specifications of each table before purchasing to ensure it can support the weight you require.", # noqa: E501
"inputs.context": "{${data.context}}",
"inputs.question": "Which tent is the most waterproof?",
"inputs.metrics": "gpt_groundedness,f1_score",
"inputs.ground_truth": "The Alpine Explorer Tent has the highest rainfly waterproof rating at 3000m",
"outputs.line_number": 1,
"outputs.ada_similarity": nan,
"outputs.f1_score": 0.0,
"outputs.gpt_coherence": nan,
"outputs.gpt_fluency": nan,
"outputs.gpt_groundedness": 3.0,
"outputs.gpt_relevance": nan,
"outputs.gpt_similarity": nan,
},
]
df = pd.DataFrame(data)
terminal_width = 74 # GitHub Actions scenario
res = _calculate_column_widths(df, terminal_width)
# the column width should at least 1 to avoid tabulate error
assert res == [4, 1, 13, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
@pytest.mark.unittest
class TestRetryUtils:
def test_retry(self):
counter = 0
class A:
def mock_f(self):
return 1
class B(A):
@retry(Exception, tries=2, delay=1, backoff=1)
def mock_f(self):
nonlocal counter
counter += 1
raise Exception("mock exception")
with pytest.raises(Exception):
B().mock_f()
assert counter == 2
def test_http_retry(self):
counter = 0
def mock_http_request():
nonlocal counter
counter += 1
resp = Response()
resp.status_code = 429
return resp
http_retry_wrapper(mock_http_request, tries=2, delay=1, backoff=1)()
assert counter == 2
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.