text
stringlengths 8
1.72M
| id
stringlengths 22
143
| metadata
dict | __index_level_0__
int64 0
104
|
---|---|---|---|
from promptflow import tool
@tool
def my_python_tool(input1: str) -> str:
return 'hello ' + input1
| promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/node_a.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/node_a.py",
"repo_id": "promptflow",
"token_count": 37
} | 60 |
import asyncio
import json
from openai import AsyncOpenAI
from openai.types.beta.threads import MessageContentImageFile, MessageContentText
from promptflow import tool, trace
from promptflow.connections import OpenAIConnection
from promptflow.contracts.multimedia import Image
from promptflow.contracts.types import AssistantDefinition
from promptflow.exceptions import SystemErrorException
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
URL_PREFIX = "https://platform.openai.com/files/"
RUN_STATUS_POLLING_INTERVAL_IN_MILSEC = 1000
@tool
async def add_message_and_run(
conn: OpenAIConnection,
assistant_id: str,
thread_id: str,
message: list,
assistant_definition: AssistantDefinition,
download_images: bool,
):
cli = await get_openai_api_client(conn)
invoker = await get_assisant_tool_invoker(assistant_definition)
# Check if assistant id is valid. If not, create a new assistant.
# Note: tool registration at run creation, rather than at assistant creation.
if not assistant_id:
assistant = await create_assistant(cli, assistant_definition)
assistant_id = assistant.id
await add_message(cli, message, thread_id)
run = await start_run(cli, assistant_id, thread_id, assistant_definition, invoker)
await wait_for_run_complete(cli, thread_id, invoker, run)
messages = await get_message(cli, thread_id)
file_id_references = await get_openai_file_references(messages.data[0].content, download_images, conn)
return {"content": to_pf_content(messages.data[0].content), "file_id_references": file_id_references}
@trace
async def get_openai_api_client(conn: OpenAIConnection):
cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization)
return cli
@trace
async def get_assisant_tool_invoker(assistant_definition: AssistantDefinition):
invoker = AssistantToolInvoker.init(assistant_definition.tools)
return invoker
@trace
async def create_assistant(cli: AsyncOpenAI, assistant_definition: AssistantDefinition):
assistant = await cli.beta.assistants.create(
instructions=assistant_definition.instructions, model=assistant_definition.model
)
print(f"Created assistant: {assistant.id}")
return assistant
@trace
async def add_message(cli: AsyncOpenAI, message: list, thread_id: str):
content = extract_text_from_message(message)
file_ids = await extract_file_ids_from_message(cli, message)
msg = await cli.beta.threads.messages.create(thread_id=thread_id, role="user", content=content, file_ids=file_ids)
print("Created message message_id: {msg.id}, assistant_id: {assistant_id}, thread_id: {thread_id}")
return msg
@trace
async def start_run(
cli: AsyncOpenAI,
assistant_id: str,
thread_id: str,
assistant_definition: AssistantDefinition,
invoker: AssistantToolInvoker,
):
tools = invoker.to_openai_tools()
run = await cli.beta.threads.runs.create(
assistant_id=assistant_id,
thread_id=thread_id,
model=assistant_definition.model,
instructions=assistant_definition.instructions,
tools=tools,
)
print(f"Assistant_id: {assistant_id}, thread_id: {thread_id}, run_id: {run.id}")
return run
async def wait_for_status_check():
await asyncio.sleep(RUN_STATUS_POLLING_INTERVAL_IN_MILSEC / 1000.0)
async def get_run_status(cli: AsyncOpenAI, thread_id: str, run_id: str):
run = await cli.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id)
print(f"Run status: {run.status}")
return run
@trace
async def get_tool_calls_outputs(invoker: AssistantToolInvoker, run):
tool_calls = run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for tool_call in tool_calls:
tool_name = tool_call.function.name
tool_args = json.loads(tool_call.function.arguments)
print(f"Invoking tool: {tool_call.function.name} with args: {tool_args}")
output = invoker.invoke_tool(tool_name, tool_args)
tool_outputs.append(
{
"tool_call_id": tool_call.id,
"output": str(output),
}
)
print(f"Tool output: {str(output)}")
return tool_outputs
@trace
async def submit_tool_calls_outputs(cli: AsyncOpenAI, thread_id: str, run_id: str, tool_outputs: list):
await cli.beta.threads.runs.submit_tool_outputs(thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs)
print(f"Submitted all required resonses for run: {run_id}")
@trace
async def require_actions(cli: AsyncOpenAI, thread_id: str, run, invoker: AssistantToolInvoker):
tool_outputs = await get_tool_calls_outputs(invoker, run)
await submit_tool_calls_outputs(cli, thread_id, run.id, tool_outputs)
@trace
async def wait_for_run_complete(cli: AsyncOpenAI, thread_id: str, invoker: AssistantToolInvoker, run):
while run.status != "completed":
await wait_for_status_check()
run = await get_run_status(cli, thread_id, run.id)
if run.status == "requires_action":
await require_actions(cli, thread_id, run, invoker)
elif run.status == "in_progress" or run.status == "completed":
continue
else:
raise Exception(f"The assistant tool runs in '{run.status}' status. Message: {run.last_error.message}")
@trace
async def get_run_steps(cli: AsyncOpenAI, thread_id: str, run_id: str):
run_steps = await cli.beta.threads.runs.steps.list(thread_id=thread_id, run_id=run_id)
print("step details: \n")
for step_data in run_steps.data:
print(step_data.step_details)
@trace
async def get_message(cli: AsyncOpenAI, thread_id: str):
messages = await cli.beta.threads.messages.list(thread_id=thread_id)
return messages
def extract_text_from_message(message: list):
content = []
for m in message:
if isinstance(m, str):
content.append(m)
continue
message_type = m.get("type", "")
if message_type == "text" and "text" in m:
content.append(m["text"])
return "\n".join(content)
async def extract_file_ids_from_message(cli: AsyncOpenAI, message: list):
file_ids = []
for m in message:
if isinstance(m, str):
continue
message_type = m.get("type", "")
if message_type == "file_path" and "file_path" in m:
path = m["file_path"].get("path", "")
if path:
file = await cli.files.create(file=open(path, "rb"), purpose="assistants")
file_ids.append(file.id)
return file_ids
async def get_openai_file_references(content: list, download_image: bool, conn: OpenAIConnection):
file_id_references = {}
for item in content:
if isinstance(item, MessageContentImageFile):
file_id = item.image_file.file_id
if download_image:
file_id_references[file_id] = {
"content": await download_openai_image(file_id, conn),
"url": URL_PREFIX + file_id,
}
else:
file_id_references[file_id] = {"url": URL_PREFIX + file_id}
elif isinstance(item, MessageContentText):
for annotation in item.text.annotations:
if annotation.type == "file_path":
file_id = annotation.file_path.file_id
file_id_references[file_id] = {"url": URL_PREFIX + file_id}
elif annotation.type == "file_citation":
file_id = annotation.file_citation.file_id
file_id_references[file_id] = {"url": URL_PREFIX + file_id}
else:
raise Exception(f"Unsupported content type: '{type(item)}'.")
return file_id_references
def to_pf_content(content: list):
pf_content = []
for item in content:
if isinstance(item, MessageContentImageFile):
file_id = item.image_file.file_id
pf_content.append({"type": "image_file", "image_file": {"file_id": file_id}})
elif isinstance(item, MessageContentText):
text_dict = {"type": "text", "text": {"value": item.text.value, "annotations": []}}
for annotation in item.text.annotations:
annotation_dict = {
"type": "file_path",
"text": annotation.text,
"start_index": annotation.start_index,
"end_index": annotation.end_index,
}
if annotation.type == "file_path":
annotation_dict["file_path"] = {"file_id": annotation.file_path.file_id}
elif annotation.type == "file_citation":
annotation_dict["file_citation"] = {"file_id": annotation.file_citation.file_id}
text_dict["text"]["annotations"].append(annotation_dict)
pf_content.append(text_dict)
else:
raise SystemErrorException(f"Unsupported content type: {type(item)}")
return pf_content
async def download_openai_image(file_id: str, conn: OpenAIConnection):
cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization)
image_data = await cli.files.content(file_id)
return Image(image_data.read())
| promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/add_message_and_run.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/add_message_and_run.py",
"repo_id": "promptflow",
"token_count": 3874
} | 61 |
{# Please replace the template with your own prompt. #}
system:
You task is to generate what I ask
user:
Write a simple {{text}} program that displays the greeting message when executed.
| promptflow/src/promptflow/tests/test_configs/flows/basic-with-connection/hello.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/basic-with-connection/hello.jinja2",
"repo_id": "promptflow",
"token_count": 45
} | 62 |
from promptflow import tool
from promptflow.contracts.multimedia import Image
@tool
def mock_chat(chat_history: list, question: list):
ensure_image_in_list(question, "question")
for item in chat_history:
ensure_image_in_list(item["inputs"]["question"], "inputs of chat history")
ensure_image_in_list(item["outputs"]["answer"], "outputs of chat history")
res = []
for item in question:
if isinstance(item, Image):
res.append(item)
res.append("text response")
return res
def ensure_image_in_list(value: list, name: str):
include_image = False
for item in value:
if isinstance(item, Image):
include_image = True
if not include_image:
raise Exception(f"No image found in {name}, you should include at least one image in your {name}.")
| promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_image/mock_chat.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_image/mock_chat.py",
"repo_id": "promptflow",
"token_count": 310
} | 63 |
from promptflow import tool
@tool
def grade(groundtruth: str, prediction: str):
groundtruth = groundtruth.lower().strip('"')
prediction = prediction.lower().strip('"')
return "Correct" if groundtruth == prediction else "Incorrect"
| promptflow/src/promptflow/tests/test_configs/flows/classification_accuracy_evaluation/grade.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/classification_accuracy_evaluation/grade.py",
"repo_id": "promptflow",
"token_count": 71
} | 64 |
from promptflow import tool
@tool
def kql_retriever(content: str) -> str:
return "KQL: " + content | promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/kql_tsg_retriever.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/kql_tsg_retriever.py",
"repo_id": "promptflow",
"token_count": 35
} | 65 |
inputs:
image:
type: image
default: logo.jpg
outputs:
output:
type: image
reference: ${python_node.output}
nodes:
- name: python_node
type: python
source:
type: code
path: pick_an_image.py
inputs:
image_1: ${inputs.image}
image_2:
data:image/png;path: logo_2.png
- name: aggregate
type: python
source:
type: code
path: merge_images.py
inputs:
image_1:
- data:image/jpg;path: logo.jpg
image_2: ${inputs.image}
image_3: ${python_node.output}
aggregation: true
| promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_simple_image/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_simple_image/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 231
} | 66 |
from promptflow import tool
from promptflow.connections import CustomConnection
@tool
def my_python_tool(text: str, connection: CustomConnection) -> dict:
return connection._to_dict()
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_custom_connection/hello.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_custom_connection/hello.py",
"repo_id": "promptflow",
"token_count": 51
} | 67 |
id: web_classification
inputs:
url:
default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h
is_chat_input: false
type: string
nodes:
- inputs:
url: ${inputs.url}
name: fetch_text_content_from_url
reduce: false
source:
path: fetch_text_content_from_url.py
type: code
type: python
outputs:
text:
evaluation_only: false
is_chat_output: false
reference: ${fetch_text_content_from_url.output}
type: string
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 219
} | 68 |
{"text": "Hello World!"}
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_connection/data.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_connection/data.jsonl",
"repo_id": "promptflow",
"token_count": 9
} | 69 |
inputs:
user_id:
type: int
default: 1
outputs:
output:
type: string
reference: ${greetings.output.greeting}
nodes:
- name: greetings
type: python
source:
type: code
path: greetings.py
inputs:
user_id: ${inputs.user_id}
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_trace/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_trace/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 111
} | 70 |
inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${llm_tool_with_duplicated_inputs.output}
nodes:
- name: llm_tool_with_duplicated_inputs
type: llm
provider: AzureOpenAI
api: completion
module: promptflow.tools.aoai
connection: azure_open_ai_connection
source:
type: code
path: prompt_with_duplicated_inputs.jinja2
inputs:
deployment_name: text-ada-001
max_tokens: 16
text: ${inputs.text}
| promptflow/src/promptflow/tests/test_configs/flows/llm_tool_with_duplicated_inputs/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/llm_tool_with_duplicated_inputs/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 189
} | 71 |
from promptflow import tool
@tool
def stringify_num():
print("hello world")
| promptflow/src/promptflow/tests/test_configs/flows/no_inputs_outputs/say_hello.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/no_inputs_outputs/say_hello.py",
"repo_id": "promptflow",
"token_count": 26
} | 72 |
{"key": "no"}
{"key": "raise"}
{"key": "matter"}
| promptflow/src/promptflow/tests/test_configs/flows/partial_fail/data.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/partial_fail/data.jsonl",
"repo_id": "promptflow",
"token_count": 21
} | 73 |
{
"name": "summarize_text_content_prompt",
"type": "prompt",
"inputs": {
"text": {
"type": [
"string"
]
},
"image1": {
"type": [
"image"
]
},
"image2": {
"type": [
"image"
]
},
"image3": {
"type": [
"image"
]
},
"image4": {
"type": [
"image"
]
},
"video1": {
"type": [
"string"
]
}
},
"source": "summarize_text_content_prompt.jinja2"
} | promptflow/src/promptflow/tests/test_configs/flows/prompt_tools/summarize_text_content_prompt.meta.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/prompt_tools/summarize_text_content_prompt.meta.json",
"repo_id": "promptflow",
"token_count": 311
} | 74 |
inputs:
image:
type: image
default: logo.jpg
outputs:
output:
type: image
reference: ${python_node.output}
nodes:
- name: python_node
type: python
source:
type: code
path: passthrough.py
inputs:
image: ${inputs.image}
| promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_image_nested_api_calls/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_image_nested_api_calls/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 106
} | 75 |
[
{
"image_2": {
"data:image/png;path": "logo.jpg"
}
},
{
"image_2": {
"data:image/png;path": "logo_2.png"
}
}
] | promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image_with_default/inputs.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image_with_default/inputs.json",
"repo_id": "promptflow",
"token_count": 125
} | 76 |
from promptflow import tool
print(f"The script is {__file__}")
@tool
def my_python_tool(input1: str) -> str:
from pathlib import Path
assert Path(__file__).as_posix().endswith("folder/another-tool.py")
assert __name__ == "__pf_main__"
return f"Prompt: {input1} {__file__}"
| promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/folder/another-tool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/folder/another-tool.py",
"repo_id": "promptflow",
"token_count": 114
} | 77 |
model: mock_model
instructions: mock_instructions
tools:
- type: function
tool_type: python
source:
type: code
path: echo.py
| promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/assistant_definition.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/assistant_definition.yaml",
"repo_id": "promptflow",
"token_count": 51
} | 78 |
Please summarize the following text in one paragraph. 100 words.
Do not add any information that is not in the text.
Text: {{text}}
Summary:
| promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/lkg_sources/summarize_text_content.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/lkg_sources/summarize_text_content.jinja2",
"repo_id": "promptflow",
"token_count": 35
} | 79 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/api-version=2023-06-01-preview
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}}'
headers:
cache-control:
- no-cache
content-length:
- '3519'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.019'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?api-version=2023-04-01-preview&count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.063'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/Connections/specs
response:
body:
string: '[{"connectionCategory": 15, "flowValueType": "AzureContentSafetyConnection",
"connectionType": "AzureContentSafety", "connectionTypeDisplayName": "Azure
content safety", "configSpecs": [{"name": "api_key", "displayName": "API key",
"configValueType": "Secret", "isOptional": false}, {"name": "endpoint", "displayName":
"Endpoint", "configValueType": "String", "isOptional": false}, {"name": "api_version",
"displayName": "API version", "configValueType": "String", "defaultValue":
"2023-04-30-preview", "isOptional": false}, {"name": "api_type", "displayName":
"API type", "configValueType": "String", "defaultValue": "Content Safety",
"isOptional": false}], "module": "promptflow.connections"}, {"connectionCategory":
13, "flowValueType": "AzureOpenAIConnection", "connectionType": "AzureOpenAI",
"connectionTypeDisplayName": "Azure OpenAI", "configSpecs": [{"name": "api_key",
"displayName": "API key", "configValueType": "Secret", "isOptional": false},
{"name": "api_base", "displayName": "API base", "configValueType": "String",
"isOptional": false}, {"name": "api_type", "displayName": "API type", "configValueType":
"String", "defaultValue": "azure", "isOptional": false}, {"name": "api_version",
"displayName": "API version", "configValueType": "String", "defaultValue":
"2023-07-01-preview", "isOptional": false}, {"name": "resource_id", "displayName":
"Resource id", "configValueType": "String", "isOptional": false}], "module":
"promptflow.connections"}, {"connectionCategory": 14, "flowValueType": "CognitiveSearchConnection",
"connectionType": "CognitiveSearch", "connectionTypeDisplayName": "Cognitive
search", "configSpecs": [{"name": "api_key", "displayName": "API key", "configValueType":
"Secret", "isOptional": false}, {"name": "api_base", "displayName": "API base",
"configValueType": "String", "isOptional": false}, {"name": "api_version",
"displayName": "API version", "configValueType": "String", "defaultValue":
"2023-07-01-Preview", "isOptional": false}], "module": "promptflow.connections"},
{"connectionCategory": 16, "flowValueType": "CustomConnection", "connectionType":
"Custom", "connectionTypeDisplayName": "Custom", "module": "promptflow.connections"},
{"connectionCategory": 16, "flowValueType": "OpenAIConnection", "connectionType":
"OpenAI", "connectionTypeDisplayName": "OpenAI", "configSpecs": [{"name":
"api_key", "displayName": "API key", "configValueType": "Secret", "isOptional":
false}, {"name": "organization", "displayName": "Organization", "configValueType":
"String", "isOptional": true}], "module": "promptflow.connections"}, {"connectionCategory":
16, "flowValueType": "QdrantConnection", "connectionType": "Qdrant", "connectionTypeDisplayName":
"Qdrant", "configSpecs": [{"name": "api_key", "displayName": "API key", "configValueType":
"Secret", "isOptional": false}, {"name": "api_base", "displayName": "API base",
"configValueType": "String", "isOptional": false}], "module": "promptflow_vectordb.connections.qdrant"},
{"connectionCategory": 16, "flowValueType": "SerpConnection", "connectionType":
"Serp", "connectionTypeDisplayName": "Serp", "configSpecs": [{"name": "api_key",
"displayName": "API key", "configValueType": "Secret", "isOptional": false}],
"module": "promptflow.connections"}, {"connectionCategory": 16, "flowValueType":
"WeaviateConnection", "connectionType": "Weaviate", "connectionTypeDisplayName":
"Weaviate", "configSpecs": [{"name": "api_key", "displayName": "API key",
"configValueType": "Secret", "isOptional": false}, {"name": "api_base", "displayName":
"API base", "configValueType": "String", "isOptional": false}], "module":
"promptflow_vectordb.connections.weaviate"}]'
headers:
connection:
- keep-alive
content-length:
- '3402'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.769'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_list_connection_spec.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_list_connection_spec.yaml",
"repo_id": "promptflow",
"token_count": 3527
} | 80 |
inputs:
text:
type: string
outputs:
output:
type: string
reference: ${summarize_text_content.output}
nodes:
- name: summarize_text_content
source:
type: code
path: summarize_text_content__variant_1.jinja2
inputs:
deployment_name: gpt-35-turbo
suffix: ''
max_tokens: '256'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${inputs.text}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion
module: promptflow.tools.aoai
| promptflow/src/promptflow/tests/test_configs/wrong_flows/tool_type_missing/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/tool_type_missing/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 272
} | 81 |
# Frequency asked questions (FAQ)
## Troubleshooting ##
### Token expired when run pfazure cmd
If hit error "AADSTS700082: The refresh token has expired due to inactivity." when running pfazure cmd, it's caused by local cached token expired. Please clear the cached token under "%LOCALAPPDATA%/.IdentityService/msal.cache". Then run below command to login again:
```sh
az login
``` | promptflow/docs/cloud/azureai/faq.md/0 | {
"file_path": "promptflow/docs/cloud/azureai/faq.md",
"repo_id": "promptflow",
"token_count": 108
} | 0 |
# Deploy a flow using development server
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../faq.md#stable-vs-experimental).
:::
Once you have created and thoroughly tested a flow, you can use it as an HTTP endpoint.
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
We are going to use the [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification/) as
an example to show how to deploy a flow.
Please ensure you have [create the connection](../manage-connections.md#create-a-connection) required by flow, if not, you could
refer to [Setup connection for web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification).
Note: We will use relevant environment variable ({connection_name}_{key_name}) to override connection configurations in
serving mode, white space in connection name will be removed directly from environment variable name. For instance,
if there is a custom connection named 'custom_connection' with a configuration key called 'chat_deployment_name,' the
function will attempt to retrieve 'chat_deployment_name' from the environment variable
'CUSTOM_CONNECTION_CHAT_DEPLOYMENT_NAME' by default. If the environment variable is not set, it will use the original
value as a fallback.
The following CLI commands allows you serve a flow folder as an endpoint. By running this command, a [flask](https://flask.palletsprojects.com/en/) app will start in the environment where command is executed, please ensure all prerequisites required by flow have been installed.
```bash
# Serve the flow at localhost:8080
pf flow serve --source <path-to-your-flow-folder> --port 8080 --host localhost
```
The expected result is as follows if the flow served successfully, and the process will keep alive until it be killed manually.

:::
:::{tab-item} VS Code Extension
:sync: VSC
In visual editor, choose:

then choose format:

then in yaml editor:

:::
::::
## Test endpoint
::::{tab-set}
:::{tab-item} Bash
You could open another terminal to test the endpoint with the following command:
```bash
curl http://localhost:8080/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
:::
:::{tab-item} PowerShell
You could open another terminal to test the endpoint with the following command:
```powershell
Invoke-WebRequest -URI http://localhost:8080/score -Body '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -Method POST -ContentType "application/json"
```
:::
:::{tab-item} Test Page
The development server has a built-in web page you can use to test the flow. Open 'http://localhost:8080' in your browser.

:::
::::
## Next steps
- Try the example [here](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification/).
- See how to [deploy a flow using docker](deploy-using-docker.md).
- See how to [deploy a flow using kubernetes](deploy-using-kubernetes.md).
| promptflow/docs/how-to-guides/deploy-a-flow/deploy-using-dev-server.md/0 | {
"file_path": "promptflow/docs/how-to-guides/deploy-a-flow/deploy-using-dev-server.md",
"repo_id": "promptflow",
"token_count": 999
} | 1 |
# Customizing an LLM Tool
In this document, we will guide you through the process of customizing an LLM tool, allowing users to seamlessly connect to a large language model with prompt tuning experience using a `PromptTemplate`.
## Prerequisites
- Please ensure that your [Prompt flow for VS Code](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) is updated to version 1.2.0 or later.
## How to customize an LLM tool
Here we use [an existing tool package](https://github.com/microsoft/promptflow/tree/main/examples/tools/tool-package-quickstart/my_tool_package) as an example. If you want to create your own tool, please refer to [create and use tool package](create-and-use-tool-package.md).
1. Develop the tool code as in [this example](https://github.com/microsoft/promptflow/blob/main/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_custom_llm_type.py).
- Add a `CustomConnection` input to the tool, which is used to authenticate and establish a connection to the large language model.
- Add a `PromptTemplate` input to the tool, which serves as an argument to be passed into the large language model.
```python
from jinja2 import Template
from promptflow import tool
from promptflow.connections import CustomConnection
from promptflow.contracts.types import PromptTemplate
@tool
def my_tool(connection: CustomConnection, prompt: PromptTemplate, **kwargs) -> str:
# Customize your own code to use the connection and prompt here.
rendered_prompt = Template(prompt, trim_blocks=True, keep_trailing_newline=True).render(**kwargs)
return rendered_prompt
```
2. Generate the custom LLM tool YAML.
Run the command below in your tool project directory to automatically generate your tool YAML, use _-t "custom_llm"_ or _--tool-type "custom_llm"_ to indicate this is a custom LLM tool:
```
python <promptflow github repo>\scripts\tool\generate_package_tool_meta.py -m <tool_module> -o <tool_yaml_path> -t "custom_llm"
```
Here we use [an existing tool](https://github.com/microsoft/promptflow/blob/main/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_custom_llm_type.yaml) as an example.
```
cd D:\proj\github\promptflow\examples\tools\tool-package-quickstart
python D:\proj\github\promptflow\scripts\tool\generate_package_tool_meta.py -m my_tool_package.tools.tool_with_custom_llm_type -o my_tool_package\yamls\tool_with_custom_llm_type.yaml -n "My Custom LLM Tool" -d "This is a tool to demonstrate how to customize an LLM tool with a PromptTemplate." -t "custom_llm"
```
This command will generate a YAML file as follows:
```yaml
my_tool_package.tools.tool_with_custom_llm_type.my_tool:
name: My Custom LLM Tool
description: This is a tool to demonstrate how to customize an LLM tool with a PromptTemplate.
# The type is custom_llm.
type: custom_llm
module: my_tool_package.tools.tool_with_custom_llm_type
function: my_tool
inputs:
connection:
type:
- CustomConnection
```
## Use the tool in VS Code
Follow the steps to [build and install your tool package](create-and-use-tool-package.md#build-and-share-the-tool-package) and [use your tool from VS Code extension](create-and-use-tool-package.md#use-your-tool-from-vscode-extension).
Here we use an existing flow to demonstrate the experience, open [this flow](https://github.com/microsoft/promptflow/blob/main/examples/tools/use-cases/custom_llm_tool_showcase/flow.dag.yaml) in VS Code extension.
- There is a node named "my_custom_llm_tool" with a prompt template file. You can either use an existing file or create a new one as the prompt template file.

| promptflow/docs/how-to-guides/develop-a-tool/customize_an_llm_tool.md/0 | {
"file_path": "promptflow/docs/how-to-guides/develop-a-tool/customize_an_llm_tool.md",
"repo_id": "promptflow",
"token_count": 1277
} | 2 |
---
myst:
html_meta:
"description lang=en": "Prompt flow Doc"
"google-site-verification": "rEZN-2h5TVqEco07aaMpqNcDx4bjr2czx1Hwfoxydrg"
html_theme.sidebar_secondary.remove: true
---
# Prompt flow
[**Prompt flow**](https://github.com/microsoft/promptflow) is a suite of development tools designed to streamline the end-to-end development cycle of LLM-based AI applications, from ideation, prototyping, testing, evaluation to production deployment and monitoring. It makes prompt engineering much easier and enables you to build LLM apps with production quality.
With prompt flow, you will be able to:
- **Create [flows](./concepts/concept-flows.md)** that link [LLMs](./reference/tools-reference/llm-tool.md), [prompts](./reference/tools-reference/prompt-tool.md), [Python](./reference/tools-reference/python-tool.md) code and other [tools](./concepts/concept-tools.md) together in a executable workflow.
- **Debug and iterate your flows**, especially the interaction with LLMs with ease.
- **Evaluate your flows**, calculate quality and performance metrics with larger datasets.
- **Integrate the testing and evaluation into your CI/CD system** to ensure quality of your flow.
- **Deploy your flows** to the serving platform you choose or integrate into your app's code base easily.
- (Optional but highly recommended) **Collaborate with your team** by leveraging the cloud version of [Prompt flow in Azure AI](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/overview-what-is-prompt-flow?view=azureml-api-2).
> Welcome to join us to make prompt flow better by
> participating [discussions](https://github.com/microsoft/promptflow/discussions),
> opening [issues](https://github.com/microsoft/promptflow/issues/new/choose),
> submitting [PRs](https://github.com/microsoft/promptflow/pulls).
This documentation site contains guides for prompt flow [sdk, cli](https://pypi.org/project/promptflow/) and [vscode extension](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) users.
```{gallery-grid}
:grid-columns: 1 2 2 2
- header: "🚀 Quick Start"
content: "
Quick start and end-to-end tutorials.<br/><br/>
- [Getting started with prompt flow](how-to-guides/quick-start.md)<br/>
- [E2E development tutorial: chat with PDF](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md)<br/>
- Find more: [tutorials & samples](tutorials/index.md)<br/>
"
- header: "📒 How-to Guides"
content: "
Articles guide user to complete a specific task in prompt flow.<br/><br/>
- [Develop a flow](how-to-guides/develop-a-flow/index.md)<br/>
- [Initialize and test a flow](how-to-guides/init-and-test-a-flow.md)<br/>
- [Run and evaluate a flow](how-to-guides/run-and-evaluate-a-flow/index.md)<br/>
- [Tune prompts using variants](how-to-guides/tune-prompts-with-variants.md)<br/>
- [Develop custom tool](how-to-guides/develop-a-tool/create-and-use-tool-package.md)<br/>
- [Deploy a flow](how-to-guides/deploy-a-flow/index.md)<br/>
- [Process image in flow](how-to-guides/process-image-in-flow.md)
"
```
```{gallery-grid}
:grid-columns: 1 2 2 2
- header: "📑 Concepts"
content: "
Introduction of key concepts of prompt flow.<br/><br/>
- [Flows](concepts/concept-flows.md)<br/>
- [Tools](concepts/concept-tools.md)<br/>
- [Connections](concepts/concept-connections.md)<br/>
- [Design principles](concepts/design-principles.md)<br/>
"
- header: "🔍 Reference"
content: "
Reference provides technical information about prompt flow API.<br/><br/>
- Command line Interface reference: [pf](reference/pf-command-reference.md)<br/>
- Python library reference: [promptflow](reference/python-library-reference/promptflow.md)<br/>
- Tool reference: [LLM Tool](reference/tools-reference/llm-tool.md), [Python Tool](reference/tools-reference/python-tool.md), [Prompt Tool](reference/tools-reference/prompt-tool.md)<br/>
"
```
```{toctree}
:hidden:
:maxdepth: 1
how-to-guides/quick-start
```
```{toctree}
:hidden:
:maxdepth: 1
how-to-guides/index
```
```{toctree}
:hidden:
:maxdepth: 1
tutorials/index
```
```{toctree}
:hidden:
:maxdepth: 2
concepts/index
```
```{toctree}
:hidden:
:maxdepth: 1
reference/index
```
```{toctree}
:hidden:
:maxdepth: 1
cloud/index
```
```{toctree}
:hidden:
:maxdepth: 1
integrations/index
``` | promptflow/docs/index.md/0 | {
"file_path": "promptflow/docs/index.md",
"repo_id": "promptflow",
"token_count": 1541
} | 3 |
# OpenAI GPT-4V
## Introduction
OpenAI GPT-4V tool enables you to leverage OpenAI's GPT-4 with vision, also referred to as GPT-4V or gpt-4-vision-preview in the API, to take images as input and answer questions about them.
## Prerequisites
- Create OpenAI resources
Sign up account [OpenAI website](https://openai.com/)
Login and [Find personal API key](https://platform.openai.com/account/api-keys)
- Get Access to GPT-4 API
To use GPT-4 with vision, you need access to GPT-4 API. Learn more about [How to get access to GPT-4 API](https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4)
## Connection
Setup connections to provisioned resources in prompt flow.
| Type | Name | API KEY |
|-------------|----------|----------|
| OpenAI | Required | Required |
## Inputs
| Name | Type | Description | Required |
|------------------------|-------------|------------------------------------------------------------------------------------------------|----------|
| connection | OpenAI | the OpenAI connection to be used in the tool | Yes |
| model | string | the language model to use, currently only support gpt-4-vision-preview | Yes |
| prompt | string | The text prompt that the language model will use to generate it's response. | Yes |
| max\_tokens | integer | the maximum number of tokens to generate in the response. Default is 512. | No |
| temperature | float | the randomness of the generated text. Default is 1. | No |
| stop | list | the stopping sequence for the generated text. Default is null. | No |
| top_p | float | the probability of using the top choice from the generated tokens. Default is 1. | No |
| presence\_penalty | float | value that controls the model's behavior with regards to repeating phrases. Default is 0. | No |
| frequency\_penalty | float | value that controls the model's behavior with regards to generating rare phrases. Default is 0. | No |
## Outputs
| Return Type | Description |
|-------------|------------------------------------------|
| string | The text of one response of conversation |
| promptflow/docs/reference/tools-reference/openai-gpt-4v-tool.md/0 | {
"file_path": "promptflow/docs/reference/tools-reference/openai-gpt-4v-tool.md",
"repo_id": "promptflow",
"token_count": 1118
} | 4 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
name: open_ai_connection
type: open_ai
api_key: "<user-input>"
organization: "" # optional
| promptflow/examples/connections/openai.yml/0 | {
"file_path": "promptflow/examples/connections/openai.yml",
"repo_id": "promptflow",
"token_count": 68
} | 5 |
from promptflow import tool
import json
import re
# The inputs section will change based on the arguments of the tool function, after you save the code
# Adding type to arguments and return value will help the system show the types properly
# Please update the function name/signature per need
@tool
def my_python_tool(input1: str) -> str:
input1 = re.sub(r'[$\\!]', '', input1)
try:
json_answer = json.loads(input1)
answer = json_answer['answer']
except Exception:
answer = input1
return answer
| promptflow/examples/flows/chat/chat-math-variant/extract_result.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-math-variant/extract_result.py",
"repo_id": "promptflow",
"token_count": 172
} | 6 |
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
| promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/__init__.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/__init__.py",
"repo_id": "promptflow",
"token_count": 33
} | 7 |
from typing import Tuple, Union, Optional, Type
import functools
import time
import random
def retry_and_handle_exceptions(
exception_to_check: Union[Type[Exception], Tuple[Type[Exception], ...]],
max_retries: int = 3,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = False,
extract_delay_from_error_message: Optional[any] = None,
):
def deco_retry(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
delay = initial_delay
for i in range(max_retries):
try:
return func(*args, **kwargs)
except exception_to_check as e:
if i == max_retries - 1:
raise Exception(
"Func execution failed after {0} retries: {1}".format(
max_retries, e
)
)
delay *= exponential_base * (1 + jitter * random.random())
delay_from_error_message = None
if extract_delay_from_error_message is not None:
delay_from_error_message = extract_delay_from_error_message(
str(e)
)
final_delay = (
delay_from_error_message if delay_from_error_message else delay
)
print(
"Func execution failed. Retrying in {0} seconds: {1}".format(
final_delay, e
)
)
time.sleep(final_delay)
return wrapper
return deco_retry
def retry_and_handle_exceptions_for_generator(
exception_to_check: Union[Type[Exception], Tuple[Type[Exception], ...]],
max_retries: int = 3,
initial_delay: float = 1,
exponential_base: float = 2,
jitter: bool = False,
extract_delay_from_error_message: Optional[any] = None,
):
def deco_retry(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
delay = initial_delay
for i in range(max_retries):
try:
for value in func(*args, **kwargs):
yield value
break
except exception_to_check as e:
if i == max_retries - 1:
raise Exception(
"Func execution failed after {0} retries: {1}".format(
max_retries, e
)
)
delay *= exponential_base * (1 + jitter * random.random())
delay_from_error_message = None
if extract_delay_from_error_message is not None:
delay_from_error_message = extract_delay_from_error_message(
str(e)
)
final_delay = (
delay_from_error_message if delay_from_error_message else delay
)
print(
"Func execution failed. Retrying in {0} seconds: {1}".format(
final_delay, e
)
)
time.sleep(final_delay)
return wrapper
return deco_retry
| promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/utils/retry.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/utils/retry.py",
"repo_id": "promptflow",
"token_count": 1963
} | 8 |
import os
from typing import Union
from promptflow import tool
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from chat_with_pdf.utils.lock import acquire_lock
BASE_DIR = os.path.dirname(os.path.abspath(__file__)) + "/chat_with_pdf/"
@tool
def setup_env(connection: Union[AzureOpenAIConnection, OpenAIConnection], config: dict):
if not connection or not config:
return
if isinstance(connection, AzureOpenAIConnection):
os.environ["OPENAI_API_TYPE"] = "azure"
os.environ["OPENAI_API_BASE"] = connection.api_base
os.environ["OPENAI_API_KEY"] = connection.api_key
os.environ["OPENAI_API_VERSION"] = connection.api_version
if isinstance(connection, OpenAIConnection):
os.environ["OPENAI_API_KEY"] = connection.api_key
if connection.organization is not None:
os.environ["OPENAI_ORG_ID"] = connection.organization
for key in config:
os.environ[key] = str(config[key])
with acquire_lock(BASE_DIR + "create_folder.lock"):
if not os.path.exists(BASE_DIR + ".pdfs"):
os.mkdir(BASE_DIR + ".pdfs")
if not os.path.exists(BASE_DIR + ".index/.pdfs"):
os.makedirs(BASE_DIR + ".index/.pdfs")
return "Ready"
| promptflow/examples/flows/chat/chat-with-pdf/setup_env.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/setup_env.py",
"repo_id": "promptflow",
"token_count": 531
} | 9 |
inputs:
groundtruth:
type: string
default: "10"
is_chat_input: false
prediction:
type: string
default: "10"
is_chat_input: false
outputs:
score:
type: string
reference: ${line_process.output}
nodes:
- name: line_process
type: python
source:
type: code
path: line_process.py
inputs:
groundtruth: ${inputs.groundtruth}
prediction: ${inputs.prediction}
use_variants: false
- name: aggregate
type: python
source:
type: code
path: aggregate.py
inputs:
processed_results: ${line_process.output}
aggregation: true
use_variants: false
node_variants: {}
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
| promptflow/examples/flows/evaluation/eval-chat-math/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-chat-math/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 273
} | 10 |
from promptflow import tool
import numpy as np
from numpy.linalg import norm
@tool
def compute_ada_cosine_similarity(a, b) -> float:
return np.dot(a, b)/(norm(a)*norm(b))
| promptflow/examples/flows/evaluation/eval-qna-non-rag/ada_cosine_similarity_score.py/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-qna-non-rag/ada_cosine_similarity_score.py",
"repo_id": "promptflow",
"token_count": 67
} | 11 |
from promptflow import tool
import numpy as np
@tool
def concat_results(rag_retrieval_score: dict = None,
rag_grounding_score: dict = None, rag_generation_score: dict = None):
load_list = [{'name': 'gpt_groundedness', 'result': rag_grounding_score},
{'name': 'gpt_retrieval_score', 'result': rag_retrieval_score},
{'name': 'gpt_relevance', 'result': rag_generation_score}]
score_list = []
errors = []
for item in load_list:
if item['result']:
try:
score = float(item['result']["quality_score"])
except Exception as e:
score = np.nan
errors.append({"name": item["name"], "msg": str(e), "data": item['result']})
reasoning = item['result']['quality_reasoning']
else:
score = np.nan
reasoning = None
score_list.append({"name": item["name"], "score": score, "quality_reasoning": reasoning})
variant_level_result = {}
for item in score_list:
item_name = str(item["name"])
variant_level_result[item_name] = item["score"]
return variant_level_result
| promptflow/examples/flows/evaluation/eval-qna-rag-metrics/concat_scores.py/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-qna-rag-metrics/concat_scores.py",
"repo_id": "promptflow",
"token_count": 529
} | 12 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
name:
type: string
default: "FilmTriviaGPT"
goals:
type: list
default: ["Introduce 'Lord of the Rings' film trilogy including the film title, release year, director, current age of the director, production company and a brief summary of the film."]
role:
type: string
default: "an AI specialized in film trivia that provides accurate and up-to-date information about movies, directors, actors, and more."
outputs:
output:
type: string
reference: ${autogpt_easy_start.output}
nodes:
- name: autogpt_easy_start
type: python
source:
type: code
path: autogpt_easy_start.py
inputs:
connection: open_ai_connection
functions: ${functions.output}
model_or_deployment_name: gpt-4
system_prompt: ${system_prompt.output}
triggering_prompt: ${triggering_prompt.output}
user_prompt: ${user_prompt.output}
- name: system_prompt
type: prompt
source:
type: code
path: system_prompt.jinja2
inputs:
name: ${inputs.name}
role: ${inputs.role}
- name: user_prompt
type: prompt
source:
type: code
path: user_prompt.jinja2
inputs:
goals: ${generate_goal.output}
- name: triggering_prompt
type: prompt
source:
type: code
path: triggering_prompt.jinja2
- name: functions
type: python
source:
type: code
path: functions.py
- name: generate_goal
type: python
source:
type: code
path: generate_goal.py
inputs:
items: ${inputs.goals}
environment:
python_requirements_txt: requirements.txt
| promptflow/examples/flows/standard/autonomous-agent/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/standard/autonomous-agent/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 591
} | 13 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json
name: basic_custom_connection
type: custom
configs:
api_type: azure
api_version: 2023-03-15-preview
api_base: https://<to-be-replaced>.openai.azure.com/
secrets: # must-have
api_key: <to-be-replaced>
| promptflow/examples/flows/standard/basic-with-connection/custom.yml/0 | {
"file_path": "promptflow/examples/flows/standard/basic-with-connection/custom.yml",
"repo_id": "promptflow",
"token_count": 119
} | 14 |
from promptflow import tool
@tool
def product_info(query: str) -> str:
print(f"Your query is {query}.\nLooking for product information...")
return "This product is produced by Microsoft."
| promptflow/examples/flows/standard/conditional-flow-for-switch/product_info.py/0 | {
"file_path": "promptflow/examples/flows/standard/conditional-flow-for-switch/product_info.py",
"repo_id": "promptflow",
"token_count": 58
} | 15 |
import unittest
from cleansing import cleansing
class CleansingTest(unittest.TestCase):
def test_normal(self):
self.assertEqual(cleansing("a, b, c"), ["a", "b", "c"])
self.assertEqual(cleansing("a, b, (425)137-98-25, "), ["a", "b", "(425)137-98-25"])
self.assertEqual(cleansing("a, b, F. Scott Fitzgerald., d"), ["a", "b", "F. Scott Fitzgerald", "d"])
self.assertEqual(cleansing("a, b, c, None., "), ["a", "b", "c", "None"])
self.assertEqual(cleansing(",,"), [])
self.assertEqual(cleansing(""), [])
| promptflow/examples/flows/standard/named-entity-recognition/cleansing_test.py/0 | {
"file_path": "promptflow/examples/flows/standard/named-entity-recognition/cleansing_test.py",
"repo_id": "promptflow",
"token_count": 252
} | 16 |
system:
Please summarize the following text in one paragraph. 100 words.
Do not add any information that is not in the text.
user:
Text: {{text}}
Summary: | promptflow/examples/flows/standard/web-classification/summarize_text_content.jinja2/0 | {
"file_path": "promptflow/examples/flows/standard/web-classification/summarize_text_content.jinja2",
"repo_id": "promptflow",
"token_count": 42
} | 17 |
import importlib
from pathlib import Path
from promptflow import tool
from promptflow.contracts.types import FilePath
@tool
def my_tool(input_file: FilePath, input_text: str) -> str:
# customise your own code to handle and use the input_file here
new_module = importlib.import_module(Path(input_file).stem)
return new_module.hello(input_text)
| promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_file_path_input.py/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_file_path_input.py",
"repo_id": "promptflow",
"token_count": 111
} | 18 |
import pytest
import unittest
from promptflow.connections import CustomConnection
from my_tool_package.tools.tool_with_custom_llm_type import my_tool
@pytest.fixture
def my_custom_connection() -> CustomConnection:
my_custom_connection = CustomConnection(
{
"api-key" : "my-api-key",
"api-secret" : "my-api-secret",
"api-url" : "my-api-url"
}
)
return my_custom_connection
class TestToolWithCustomLLMType:
def test_tool_with_custom_llm_type(self, my_custom_connection):
result = my_tool(my_custom_connection, "Hello {{text}}", text="Microsoft")
assert result == "Hello Microsoft"
# Run the unit tests
if __name__ == "__main__":
unittest.main()
| promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_custom_llm_type.py/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_custom_llm_type.py",
"repo_id": "promptflow",
"token_count": 295
} | 19 |
{"text": "Python Hello World!"}
{"text": "C Hello World!"}
{"text": "C# Hello World!"}
| promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/data.jsonl/0 | {
"file_path": "promptflow/examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase/data.jsonl",
"repo_id": "promptflow",
"token_count": 31
} | 20 |
---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
---
# Deploy a flow using Docker
This example demos how to deploy flow as a docker app.
We will use [web-classification](../../../flows/standard/web-classification/README.md) as example in this tutorial.
## Build a flow as docker format app
Note that all dependent connections must be created before building as docker.
```bash
# create connection if not created before
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Use the command below to build a flow as docker format app:
```bash
pf flow build --source ../../../flows/standard/web-classification --output dist --format docker
```
## Deploy with Docker
### Build Docker image
Like other Dockerfile, you need to build the image first. You can tag the image with any name you want. In this example, we use `promptflow-serve`.
Run the command below to build image:
```shell
docker build dist -t web-classification-serve
```
### Run Docker image
Run the docker image will start a service to serve the flow inside the container.
#### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in containers.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
You'll need to set up the environment variables in the container to make the connections work.
### Run with `docker run`
You can run the docker image directly set via below commands:
```shell
# The started service will listen on port 8080.You can map the port to any port on the host machine as you want.
docker run -p 8080:8080 -e OPEN_AI_CONNECTION_API_KEY=<secret-value> web-classification-serve
```
### Test the endpoint
After start the service, you can use curl to test it:
```shell
curl http://localhost:8080/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
| promptflow/examples/tutorials/flow-deploy/docker/README.md/0 | {
"file_path": "promptflow/examples/tutorials/flow-deploy/docker/README.md",
"repo_id": "promptflow",
"token_count": 672
} | 21 |
import argparse
import json
from pathlib import Path
from azure.keyvault.secrets import SecretClient
from azure.identity import ClientSecretCredential, DefaultAzureCredential
CONNECTION_FILE_NAME = "connections.json"
CONNECTION_TPL_FILE_PATH = Path(".") / "src/promptflow" / "dev-connections.json.example"
def get_secret_client(
tenant_id: str, client_id: str, client_secret: str
) -> SecretClient:
try:
if (tenant_id is None) or (client_id is None) or (client_secret is None):
credential = DefaultAzureCredential()
client = SecretClient(
vault_url="https://promptflowprod.vault.azure.net/",
credential=credential,
)
else:
credential = ClientSecretCredential(tenant_id, client_id, client_secret)
client = SecretClient(
vault_url="https://github-promptflow.vault.azure.net/",
credential=credential,
)
except Exception as e:
print(e)
return client
def get_secret(secret_name: str, client: SecretClient):
secret = client.get_secret(secret_name)
return secret.value
def list_secret_names(client: SecretClient) -> list:
secret_properties = client.list_properties_of_secrets()
return [secret.name for secret in secret_properties]
def fill_key_to_dict(template_dict, keys_dict):
if not isinstance(template_dict, dict):
return
for key, val in template_dict.items():
if isinstance(val, str) and val in keys_dict:
template_dict[key] = keys_dict[val]
continue
fill_key_to_dict(val, keys_dict)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--tenant_id", type=str, help="The tenant id of the service principal"
)
parser.add_argument(
"--client_id", type=str, help="The client id of the service principal"
)
parser.add_argument(
"--client_secret", type=str, help="The client secret of the service principal"
)
parser.add_argument(
"--target_folder", type=str, help="The target folder to save the generated file"
)
args = parser.parse_args()
template_dict = json.loads(
open(CONNECTION_TPL_FILE_PATH.resolve().absolute(), "r").read()
)
file_path = (
(Path(".") / args.target_folder / CONNECTION_FILE_NAME)
.resolve()
.absolute()
.as_posix()
)
print(f"file_path: {file_path}")
client = get_secret_client(
tenant_id=args.tenant_id,
client_id=args.client_id,
client_secret=args.client_secret,
)
all_secret_names = list_secret_names(client)
data = {
secret_name: get_secret(secret_name, client) for secret_name in all_secret_names
}
fill_key_to_dict(template_dict, data)
with open(file_path, "w") as f:
json.dump(template_dict, f)
| promptflow/scripts/building/generate_connection_config.py/0 | {
"file_path": "promptflow/scripts/building/generate_connection_config.py",
"repo_id": "promptflow",
"token_count": 1219
} | 22 |
"""A directive to generate a gallery of images from structured data.
Generating a gallery of images that are all the same size is a common
pattern in documentation, and this can be cumbersome if the gallery is
generated programmatically. This directive wraps this particular use-case
in a helper-directive to generate it with a single YAML configuration file.
It currently exists for maintainers of the pydata-sphinx-theme,
but might be abstracted into a standalone package if it proves useful.
"""
from yaml import safe_load
from typing import List
from pathlib import Path
from docutils import nodes
from docutils.parsers.rst import directives
from sphinx.util.docutils import SphinxDirective
from sphinx.util import logging
logger = logging.getLogger(__name__)
TEMPLATE_GRID = """
`````{{grid}} {grid_columns}
{container_options}
{content}
`````
"""
GRID_CARD = """
````{{grid-item-card}} {title}
{card_options}
{content}
````
"""
class GalleryDirective(SphinxDirective):
"""A directive to show a gallery of images and links in a grid."""
name = "gallery-grid"
has_content = True
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
# A class to be added to the resulting container
"grid-columns": directives.unchanged,
"class-container": directives.unchanged,
"class-card": directives.unchanged,
}
def run(self) -> List[nodes.Node]: # noqa: C901
if self.arguments:
# If an argument is given, assume it's a path to a YAML file
# Parse it and load it into the directive content
path_data_rel = Path(self.arguments[0])
path_doc, _ = self.get_source_info()
path_doc = Path(path_doc).parent
path_data = (path_doc / path_data_rel).resolve()
if not path_data.exists():
logger.warn(f"Could not find grid data at {path_data}.")
nodes.text("No grid data found at {path_data}.")
return
yaml_string = path_data.read_text()
else:
yaml_string = "\n".join(self.content)
# Read in YAML so we can generate the gallery
grid_data = safe_load(yaml_string)
grid_items = []
for item in grid_data:
# Grid card parameters
options = {}
if "website" in item:
options["link"] = item["website"]
if "class-card" in self.options:
options["class-card"] = self.options["class-card"]
if "img-background" in item:
options["img-background"] = item["img-background"]
if "img-top" in item:
options["img-top"] = item["img-top"]
if "img-bottom" in item:
options["img-bottom"] = item["img-bottom"]
options_str = "\n".join(f":{k}: {v}" for k, v in options.items()) + "\n\n"
# Grid card content
content_str = ""
if "header" in item:
content_str += f"{item['header']}\n\n^^^\n\n"
if "image" in item:
content_str += f"\n\n"
if "content" in item:
content_str += f"{item['content']}\n\n"
if "footer" in item:
content_str += f"+++\n\n{item['footer']}\n\n"
title = item.get("title", "")
content_str += "\n"
grid_items.append(
GRID_CARD.format(
card_options=options_str, content=content_str, title=title
)
)
# Parse the template with Sphinx Design to create an output
container = nodes.container()
# Prep the options for the template grid
container_options = {"gutter": 2, "class-container": "gallery-directive"}
if "class-container" in self.options:
container_options[
"class-container"
] += f' {self.options["class-container"]}'
container_options_str = "\n".join(
f":{k}: {v}" for k, v in container_options.items()
)
# Create the directive string for the grid
grid_directive = TEMPLATE_GRID.format(
grid_columns=self.options.get("grid-columns", "1 2 3 4"),
container_options=container_options_str,
content="\n".join(grid_items),
)
# Parse content as a directive so Sphinx Design processes it
self.state.nested_parse([grid_directive], 0, container)
# Sphinx Design outputs a container too, so just use that
container = container.children[0]
# Add extra classes
if self.options.get("container-class", []):
container.attributes["classes"] += self.options.get("class", [])
return [container]
| promptflow/scripts/docs/gallery_directive/__init__.py/0 | {
"file_path": "promptflow/scripts/docs/gallery_directive/__init__.py",
"repo_id": "promptflow",
"token_count": 2104
} | 23 |
@echo off
setlocal
set MAIN_EXE=%~dp0.\pfcli.exe
"%MAIN_EXE%" pfsvc %* | promptflow/scripts/installer/windows/scripts/pfsvc.bat/0 | {
"file_path": "promptflow/scripts/installer/windows/scripts/pfsvc.bat",
"repo_id": "promptflow",
"token_count": 40
} | 24 |
from pathlib import Path
from typing import List
import markdown
import nbformat
from .readme_step import ReadmeStepsManage
RESOURCES_KEY_NAME = "resources"
RESOURCES_KEY_ERROR_MESSAGE = (
"Please follow examples contributing guide to declare tutorial resources: "
"https://github.com/microsoft/promptflow/blob/main/examples/CONTRIBUTING.md"
)
def _parse_resources_string_from_notebook(path: Path) -> str:
with open(path, "r", encoding="utf-8") as f:
nb = nbformat.read(f, as_version=4)
if RESOURCES_KEY_NAME not in nb.metadata:
raise Exception(RESOURCES_KEY_ERROR_MESSAGE)
return nb.metadata[RESOURCES_KEY_NAME]
def _parse_resources_string_from_markdown(path: Path) -> str:
markdown_content = path.read_text(encoding="utf-8")
md = markdown.Markdown(extensions=["meta"])
md.convert(markdown_content)
if RESOURCES_KEY_NAME not in md.Meta:
raise Exception(RESOURCES_KEY_ERROR_MESSAGE)
return md.Meta[RESOURCES_KEY_NAME][0]
def _parse_resources(path: Path) -> List[str]:
if path.suffix == ".ipynb":
resources_string = _parse_resources_string_from_notebook(path)
elif path.suffix == ".md":
resources_string = _parse_resources_string_from_markdown(path)
else:
raise Exception(f"Unknown file type: {path.suffix!r}")
return [resource.strip() for resource in resources_string.split(",")]
def resolve_tutorial_resource(workflow_name: str, resource_path: Path) -> str:
"""Resolve tutorial resources, so that workflow can be triggered more precisely.
A tutorial workflow should listen to changes of:
1. working directory
2. resources declared in notebook/markdown metadata
3. workflow file
4. examples/requirements.txt (for release verification)
5. examples/connections/azure_openai.yml (fall back as it is the most basic and common connection)
"""
# working directory
git_base_dir = Path(ReadmeStepsManage.git_base_dir())
working_dir = resource_path.parent.relative_to(git_base_dir).as_posix()
path_filter_list = [f"{working_dir}/**"]
# resources declared in text file
resources = _parse_resources(resource_path)
for resource in resources:
# skip empty line
if len(resource) == 0:
continue
# validate resource path exists
resource_path = (git_base_dir / resource).resolve()
if not resource_path.exists():
raise FileNotFoundError("Please declare tutorial resources path whose base is the git repo root.")
elif resource_path.is_file():
path_filter_list.append(resource)
else:
path_filter_list.append(f"{resource}/**")
# workflow file
path_filter_list.append(f".github/workflows/{workflow_name}.yml")
# manually add examples/requirements.txt if not exists
examples_req = "examples/requirements.txt"
if examples_req not in path_filter_list:
path_filter_list.append(examples_req)
# manually add examples/connections/azure_openai.yml if not exists
aoai_conn = "examples/connections/azure_openai.yml"
if aoai_conn not in path_filter_list:
path_filter_list.append(aoai_conn)
return "[ " + ", ".join(path_filter_list) + " ]"
| promptflow/scripts/readme/ghactions_driver/resource_resolver.py/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/resource_resolver.py",
"repo_id": "promptflow",
"token_count": 1216
} | 25 |
{% extends "workflow_skeleton.yml.jinja2" %}
{% block steps %}
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Generate config.json for canary workspace (scheduled runs only)
if: github.event_name == 'schedule'
run: echo '${{ '{{' }} secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ '{{' }} github.workspace }}/examples/config.json
- name: Generate config.json for production workspace
if: github.event_name != 'schedule'
run: echo '${{ '{{' }} secrets.EXAMPLE_WORKSPACE_CONFIG_JSON_PROD }}' > ${{ '{{' }} github.workspace }}/examples/config.json
- name: Setup Python 3.9 environment
uses: actions/setup-python@v4
with:
python-version: "3.9"
- name: Prepare sample requirements
working-directory: {{ gh_working_dir }}
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Prepare requirements
run: |
python -m pip install --upgrade pip
pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt
pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt
- name: Create Chat With PDF Custom Connection
working-directory: {{ gh_working_dir }}
run: |
AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }}
AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}
AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/})
if [[ -e .env.example ]]; then
echo "env replacement"
sed -i -e "s/<your_AOAI_key>/$AOAI_API_KEY/g" -e "s/<your_AOAI_endpoint>/$AOAI_API_ENDPOINT/g" .env.example
mv .env.example .env
pf connection create --file .env --name chat_with_pdf_custom_connection
fi
- name: Create AOAI Connection
working-directory: examples/connections
run: |
AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }}
AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}
if [[ -e azure_openai.yml ]]; then
pf connection create --file azure_openai.yml --set api_key=$AOAI_API_KEY api_base=$AOAI_API_ENDPOINT
fi
- name: Azure Login
uses: azure/login@v1
with:
creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }}
- name: Test Notebook
working-directory: {{ gh_working_dir }}
run: |
papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb
- name: Upload artifact
if: ${{ '{{' }} always() }}
uses: actions/upload-artifact@v3
with:
name: artifact
path: {{ gh_working_dir }}
{% endblock steps %} | promptflow/scripts/readme/ghactions_driver/workflow_templates/pdf_workflow.yml.jinja2/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/workflow_templates/pdf_workflow.yml.jinja2",
"repo_id": "promptflow",
"token_count": 1069
} | 26 |
import argparse
import json
from pathlib import Path
from utils.secret_manager import get_secret, get_secret_client, list_secret_names
CONNECTION_FILE_NAME = "connections.json"
PROMPTFLOW_TOOLS_ROOT = Path(__file__) / "../../../src/promptflow-tools"
CONNECTION_TPL_FILE_PATH = PROMPTFLOW_TOOLS_ROOT / "connections.json.example"
def fill_key_to_dict(template_dict, keys_dict):
if not isinstance(template_dict, dict):
return
for key, val in template_dict.items():
if isinstance(val, str) and val in keys_dict:
template_dict[key] = keys_dict[val]
continue
fill_key_to_dict(val, keys_dict)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--tenant_id", type=str, help="The tenant id of the service principal")
parser.add_argument("--client_id", type=str, help="The client id of the service principal")
parser.add_argument("--client_secret", type=str, help="The client secret of the service principal")
parser.add_argument("--local", action='store_true', help="local debug mode")
args = parser.parse_args()
template_dict = json.loads(open(CONNECTION_TPL_FILE_PATH.resolve().absolute(), "r").read())
file_path = (PROMPTFLOW_TOOLS_ROOT / CONNECTION_FILE_NAME).resolve().absolute().as_posix()
print(f"file_path: {file_path}")
if not args.local:
client = get_secret_client(tenant_id=args.tenant_id, client_id=args.client_id, client_secret=args.client_secret)
all_secret_names = list_secret_names(client)
data = {secret_name: get_secret(secret_name, client) for secret_name in all_secret_names}
fill_key_to_dict(template_dict, data)
with open(file_path, "w") as f:
json.dump(template_dict, f)
| promptflow/scripts/tool/generate_connection_config.py/0 | {
"file_path": "promptflow/scripts/tool/generate_connection_config.py",
"repo_id": "promptflow",
"token_count": 677
} | 27 |
"""
This file can generate a meta file for the given prompt template or a python file.
"""
import inspect
import types
from dataclasses import asdict
from utils.tool_utils import function_to_interface
from promptflow.contracts.tool import Tool, ToolType
# Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow'
# since the code here is in promptflow namespace as well
from promptflow._internal import ToolProvider
from promptflow.exceptions import ErrorTarget, UserErrorException
def asdict_without_none(obj):
return asdict(obj, dict_factory=lambda x: {k: v for (k, v) in x if v})
def asdict_with_advanced_features_without_none(obj, **advanced_features):
dict_without_none = asdict_without_none(obj)
dict_without_none.update({k: v for k, v in advanced_features.items() if v})
return dict_without_none
def is_tool(f):
if not isinstance(f, types.FunctionType):
return False
if not hasattr(f, "__tool"):
return False
return True
def collect_tool_functions_in_module(m):
tools = []
for _, obj in inspect.getmembers(m):
if is_tool(obj):
# Note that the tool should be in defined in exec but not imported in exec,
# so it should also have the same module with the current function.
if getattr(obj, "__module__", "") != m.__name__:
continue
tools.append(obj)
return tools
def collect_tool_methods_in_module(m):
tools = []
for _, obj in inspect.getmembers(m):
if isinstance(obj, type) and issubclass(obj, ToolProvider) and obj.__module__ == m.__name__:
for _, method in inspect.getmembers(obj):
if is_tool(method):
initialize_inputs = obj.get_initialize_inputs()
tools.append((method, initialize_inputs))
return tools
def _parse_tool_from_function(f, initialize_inputs=None, tool_type=ToolType.PYTHON, name=None, description=None):
if hasattr(f, "__tool") and isinstance(f.__tool, Tool):
return f.__tool
if hasattr(f, "__original_function"):
f = f.__original_function
try:
inputs, _, _ = function_to_interface(f, tool_type=tool_type, initialize_inputs=initialize_inputs)
except Exception as e:
raise BadFunctionInterface(f"Failed to parse interface for tool {f.__name__}, reason: {e}") from e
class_name = None
if "." in f.__qualname__:
class_name = f.__qualname__.replace(f".{f.__name__}", "")
# Construct the Tool structure
return Tool(
name=name or f.__qualname__,
description=description or inspect.getdoc(f),
inputs=inputs,
type=tool_type,
class_name=class_name,
function=f.__name__,
module=f.__module__,
)
def generate_python_tools_in_module(module, name, description):
tool_functions = collect_tool_functions_in_module(module)
tool_methods = collect_tool_methods_in_module(module)
return [_parse_tool_from_function(f, name=name, description=description) for f in tool_functions] + [
_parse_tool_from_function(f, initialize_inputs, name=name, description=description)
for (f, initialize_inputs) in tool_methods
]
def generate_python_tools_in_module_as_dict(module, name=None, description=None, **advanced_features):
tools = generate_python_tools_in_module(module, name, description)
return _construct_tool_dict(tools, **advanced_features)
def generate_custom_llm_tools_in_module(module, name, description):
tool_functions = collect_tool_functions_in_module(module)
tool_methods = collect_tool_methods_in_module(module)
return [
_parse_tool_from_function(f, tool_type=ToolType.CUSTOM_LLM, name=name, description=description)
for f in tool_functions
] + [
_parse_tool_from_function(
f, initialize_inputs, tool_type=ToolType.CUSTOM_LLM, name=name, description=description
)
for (f, initialize_inputs) in tool_methods
]
def generate_custom_llm_tools_in_module_as_dict(module, name=None, description=None, **advanced_features):
tools = generate_custom_llm_tools_in_module(module, name, description)
return _construct_tool_dict(tools, **advanced_features)
def _construct_tool_dict(tools, **advanced_features):
return {
f"{t.module}.{t.class_name}.{t.function}"
if t.class_name is not None
else f"{t.module}.{t.function}": asdict_with_advanced_features_without_none(t, **advanced_features)
for t in tools
}
class ToolValidationError(UserErrorException):
"""Base exception raised when failed to validate tool."""
def __init__(self, message):
super().__init__(message, target=ErrorTarget.TOOL)
class PythonParsingError(ToolValidationError):
pass
class BadFunctionInterface(PythonParsingError):
pass
| promptflow/scripts/tool/utils/generate_tool_meta_utils.py/0 | {
"file_path": "promptflow/scripts/tool/utils/generate_tool_meta_utils.py",
"repo_id": "promptflow",
"token_count": 1865
} | 28 |
import functools
import json
import re
import sys
import time
from typing import List, Mapping
from jinja2 import Template
from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \
ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \
ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType
from promptflow.connections import AzureOpenAIConnection, OpenAIConnection
from promptflow.exceptions import SystemErrorException, UserErrorException
class ChatInputList(list):
"""
ChatInputList is a list of ChatInput objects. It is used to override the __str__ method of list to return a string
that can be easily parsed as message list.
"""
def __init__(self, iterable=None):
super().__init__(iterable or [])
def __str__(self):
return "\n".join(map(str, self))
def validate_role(role: str, valid_roles: List[str] = None):
if not valid_roles:
valid_roles = ["assistant", "function", "user", "system"]
if role not in valid_roles:
valid_roles_str = ','.join([f'\'{role}:\\n\'' for role in valid_roles])
error_message = (
f"The Chat API requires a specific format for prompt definition, and the prompt should include separate "
f"lines as role delimiters: {valid_roles_str}. Current parsed role '{role}'"
f" does not meet the requirement. If you intend to use the Completion API, please select the appropriate"
f" API type and deployment name. If you do intend to use the Chat API, please refer to the guideline at "
f"https://aka.ms/pfdoc/chat-prompt or view the samples in our gallery that contain 'Chat' in the name."
)
raise ChatAPIInvalidRole(message=error_message)
def validate_functions(functions):
function_example = json.dumps({
"name": "function_name",
"parameters": {
"type": "object",
"properties": {
"parameter_name": {
"type": "integer",
"description": "parameter_description"
}
}
},
"description": "function_description"
})
common_tsg = f"Here is a valid function example: {function_example}. See more details at " \
"https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions " \
"or view sample 'How to use functions with chat models' in our gallery."
if len(functions) == 0:
raise ChatAPIInvalidFunctions(message=f"functions cannot be an empty list. {common_tsg}")
else:
for i, function in enumerate(functions):
# validate if the function is a dict
if not isinstance(function, dict):
raise ChatAPIInvalidFunctions(message=f"function {i} '{function}' is not a dict. {common_tsg}")
# validate if has required keys
for key in ["name", "parameters"]:
if key not in function.keys():
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function}' does not have '{key}' property. {common_tsg}")
# validate if the parameters is a dict
if not isinstance(function["parameters"], dict):
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters '{function['parameters']}' "
f"should be described as a JSON Schema object. {common_tsg}")
# validate if the parameters has required keys
for key in ["type", "properties"]:
if key not in function["parameters"].keys():
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters '{function['parameters']}' "
f"does not have '{key}' property. {common_tsg}")
# validate if the parameters type is object
if function["parameters"]["type"] != "object":
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters 'type' "
f"should be 'object'. {common_tsg}")
# validate if the parameters properties is a dict
if not isinstance(function["parameters"]["properties"], dict):
raise ChatAPIInvalidFunctions(
message=f"function {i} '{function['name']}' parameters 'properties' "
f"should be described as a JSON Schema object. {common_tsg}")
def try_parse_name_and_content(role_prompt):
# customer can add ## in front of name/content for markdown highlight.
# and we still support name/content without ## prefix for backward compatibility.
pattern = r"\n*#{0,2}\s*name:\n+\s*(\S+)\s*\n*#{0,2}\s*content:\n?(.*)"
match = re.search(pattern, role_prompt, re.DOTALL)
if match:
return match.group(1), match.group(2)
return None
def parse_chat(chat_str, images: List = None, valid_roles: List[str] = None):
if not valid_roles:
valid_roles = ["system", "user", "assistant", "function"]
# openai chat api only supports below roles.
# customer can add single # in front of role name for markdown highlight.
# and we still support role name without # prefix for backward compatibility.
separator = r"(?i)^\s*#?\s*(" + "|".join(valid_roles) + r")\s*:\s*\n"
images = images or []
hash2images = {str(x): x for x in images}
chunks = re.split(separator, chat_str, flags=re.MULTILINE)
chat_list = []
for chunk in chunks:
last_message = chat_list[-1] if len(chat_list) > 0 else None
if last_message and "role" in last_message and "content" not in last_message:
parsed_result = try_parse_name_and_content(chunk)
if parsed_result is None:
# "name" is required if the role is "function"
if last_message["role"] == "function":
raise ChatAPIFunctionRoleInvalidFormat(
message="Failed to parse function role prompt. Please make sure the prompt follows the "
"format: 'name:\\nfunction_name\\ncontent:\\nfunction_content'. "
"'name' is required if role is function, and it should be the name of the function "
"whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, "
"with a maximum length of 64 characters. See more details in "
"https://platform.openai.com/docs/api-reference/chat/create#chat/create-name "
"or view sample 'How to use functions with chat models' in our gallery.")
# "name" is optional for other role types.
else:
last_message["content"] = to_content_str_or_list(chunk, hash2images)
else:
last_message["name"] = parsed_result[0]
last_message["content"] = to_content_str_or_list(parsed_result[1], hash2images)
else:
if chunk.strip() == "":
continue
# Check if prompt follows chat api message format and has valid role.
# References: https://platform.openai.com/docs/api-reference/chat/create.
role = chunk.strip().lower()
validate_role(role, valid_roles=valid_roles)
new_message = {"role": role}
chat_list.append(new_message)
return chat_list
def to_content_str_or_list(chat_str: str, hash2images: Mapping):
chat_str = chat_str.strip()
chunks = chat_str.split("\n")
include_image = False
result = []
for chunk in chunks:
if chunk.strip() in hash2images:
image_message = {}
image_message["type"] = "image_url"
image_url = hash2images[chunk.strip()].source_url \
if hasattr(hash2images[chunk.strip()], "source_url") else None
if not image_url:
image_bs64 = hash2images[chunk.strip()].to_base64()
image_mine_type = hash2images[chunk.strip()]._mime_type
image_url = {"url": f"data:{image_mine_type};base64,{image_bs64}"}
image_message["image_url"] = image_url
result.append(image_message)
include_image = True
elif chunk.strip() == "":
continue
else:
result.append({"type": "text", "text": chunk})
return result if include_image else chat_str
def handle_openai_error(tries: int = 10, delay: float = 8.0):
"""
A decorator function that used to handle OpenAI error.
OpenAI Error falls into retriable vs non-retriable ones.
For retriable error, the decorator use below parameters to control its retry activity with exponential backoff:
`tries` : max times for the function invocation, type is int
'delay': base delay seconds for exponential delay, type is float
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for i in range(tries + 1):
try:
return func(*args, **kwargs)
except (SystemErrorException, UserErrorException) as e:
# Throw inner wrapped exception directly
raise e
except (APIStatusError, APIConnectionError) as e:
# Handle retriable exception, please refer to
# https://platform.openai.com/docs/guides/error-codes/api-errors
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
if isinstance(e, APIConnectionError) and not isinstance(e, APITimeoutError) \
and "connection aborted" not in str(e).lower():
raise WrappedOpenAIError(e)
# Retry InternalServerError(>=500), RateLimitError(429), UnprocessableEntityError(422)
if isinstance(e, APIStatusError):
status_code = e.response.status_code
if status_code < 500 and status_code not in [429, 422]:
raise WrappedOpenAIError(e)
if isinstance(e, RateLimitError) and getattr(e, "type", None) == "insufficient_quota":
# Exit retry if this is quota insufficient error
print(f"{type(e).__name__} with insufficient quota. Throw user error.", file=sys.stderr)
raise WrappedOpenAIError(e)
if i == tries:
# Exit retry if max retry reached
print(f"{type(e).__name__} reached max retry. Exit retry with user error.", file=sys.stderr)
raise ExceedMaxRetryTimes(e)
if hasattr(e, 'response') and e.response is not None:
retry_after_in_header = e.response.headers.get("retry-after", None)
else:
retry_after_in_header = None
if not retry_after_in_header:
retry_after_seconds = delay * (2 ** i)
msg = (
f"{type(e).__name__} #{i}, but no Retry-After header, "
+ f"Back off {retry_after_seconds} seconds for retry."
)
print(msg, file=sys.stderr)
else:
retry_after_seconds = float(retry_after_in_header) * (2 ** i)
msg = (
f"{type(e).__name__} #{i}, Retry-After={retry_after_in_header}, "
f"Back off {retry_after_seconds} seconds for retry."
)
print(msg, file=sys.stderr)
time.sleep(retry_after_seconds)
except OpenAIError as e:
# For other non-retriable errors from OpenAIError,
# For example, AuthenticationError, APIConnectionError, BadRequestError, NotFoundError
# Mark UserError for all the non-retriable OpenAIError
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
raise WrappedOpenAIError(e)
except Exception as e:
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
error_message = f"OpenAI API hits exception: {type(e).__name__}: {str(e)}"
raise LLMError(message=error_message)
return wrapper
return decorator
def to_bool(value) -> bool:
return str(value).lower() == "true"
def render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs):
try:
return Template(prompt, trim_blocks=trim_blocks, keep_trailing_newline=keep_trailing_newline).render(**kwargs)
except Exception as e:
# For exceptions raised by jinja2 module, mark UserError
print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr)
error_message = f"Failed to render jinja template: {type(e).__name__}: {str(e)}. " \
+ "Please modify your prompt to fix the issue."
raise JinjaTemplateError(message=error_message) from e
def process_function_call(function_call):
if function_call is None:
param = "auto"
elif function_call == "auto" or function_call == "none":
param = function_call
else:
function_call_example = json.dumps({"name": "function_name"})
common_tsg = f"Here is a valid example: {function_call_example}. See the guide at " \
"https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call " \
"or view sample 'How to call functions with chat models' in our gallery."
param = function_call
if not isinstance(param, dict):
raise ChatAPIInvalidFunctions(
message=f"function_call parameter '{param}' must be a dict, but not {type(function_call)}. {common_tsg}"
)
else:
if "name" not in function_call:
raise ChatAPIInvalidFunctions(
message=f'function_call parameter {json.dumps(param)} must contain "name" field. {common_tsg}'
)
return param
def post_process_chat_api_response(completion, stream, functions):
if stream:
if functions is not None:
error_message = "Function calling has not been supported by stream mode yet."
raise FunctionCallNotSupportedInStreamMode(message=error_message)
def generator():
for chunk in completion:
if chunk.choices:
yield chunk.choices[0].delta.content if hasattr(chunk.choices[0].delta, 'content') and \
chunk.choices[0].delta.content is not None else ""
# We must return the generator object, not using yield directly here.
# Otherwise, the function itself will become a generator, despite whether stream is True or False.
return generator()
else:
# When calling function, function_call response will be returned as a field in message, so we need return
# message directly. Otherwise, we only return content.
if functions is not None:
return completion.model_dump()["choices"][0]["message"]
else:
# chat api may return message with no content.
return getattr(completion.choices[0].message, "content", "")
def preprocess_template_string(template_string: str) -> str:
"""Remove the image input decorator from the template string and place the image input in a new line."""
pattern = re.compile(r'\!\[(\s*image\s*)\]\(\{\{(\s*[^\s{}]+\s*)\}\}\)')
# Find all matches in the input string
matches = pattern.findall(template_string)
# Perform substitutions
for match in matches:
original = f"![{match[0]}]({{{{{match[1]}}}}})"
replacement = f"\n{{{{{match[1]}}}}}\n"
template_string = template_string.replace(original, replacement)
return template_string
def convert_to_chat_list(obj):
if isinstance(obj, dict):
return {key: convert_to_chat_list(value) for key, value in obj.items()}
elif isinstance(obj, list):
return ChatInputList([convert_to_chat_list(item) for item in obj])
else:
return obj
def add_referenced_images_to_set(value, image_set, image_type):
if isinstance(value, image_type):
image_set.add(value)
elif isinstance(value, list):
for item in value:
add_referenced_images_to_set(item, image_set, image_type)
elif isinstance(value, dict):
for _, item in value.items():
add_referenced_images_to_set(item, image_set, image_type)
def find_referenced_image_set(kwargs: dict):
referenced_images = set()
try:
from promptflow.contracts.multimedia import Image
for _, value in kwargs.items():
add_referenced_images_to_set(value, referenced_images, Image)
except ImportError:
pass
return referenced_images
def normalize_connection_config(connection):
"""
Normalizes the configuration of a given connection object for compatibility.
This function takes a connection object and normalizes its configuration,
ensuring it is compatible and standardized for use.
"""
if isinstance(connection, AzureOpenAIConnection):
return {
"api_key": connection.api_key,
"api_version": connection.api_version,
"azure_endpoint": connection.api_base
}
elif isinstance(connection, OpenAIConnection):
return {
"api_key": connection.api_key,
"organization": connection.organization,
"base_url": connection.base_url
}
else:
error_message = f"Not Support connection type '{type(connection).__name__}'. " \
f"Connection type should be in [AzureOpenAIConnection, OpenAIConnection]."
raise InvalidConnectionType(message=error_message)
| promptflow/src/promptflow-tools/promptflow/tools/common.py/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/common.py",
"repo_id": "promptflow",
"token_count": 8205
} | 29 |
import httpx
import pytest
from jinja2.exceptions import TemplateSyntaxError
from openai import (
APIConnectionError,
RateLimitError,
AuthenticationError,
BadRequestError,
APITimeoutError, InternalServerError, UnprocessableEntityError
)
from promptflow.tools.aoai import chat, completion
from promptflow.tools.common import handle_openai_error
from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, to_openai_error_message, \
JinjaTemplateError, LLMError, ChatAPIFunctionRoleInvalidFormat
from promptflow.tools.openai import chat as openai_chat
from promptflow.tools.aoai_gpt4v import AzureOpenAI as AzureOpenAIVision
from pytest_mock import MockerFixture
from promptflow.exceptions import UserErrorException
@pytest.mark.usefixtures("use_secrets_config_file")
class TestHandleOpenAIError:
def test_aoai_chat_message_invalid_format(self, aoai_provider):
# chat api prompt should follow the format of "system:\nmessage1\nuser:\nmessage2".
prompt = "what is your name"
error_codes = "UserError/ToolValidationError/ChatAPIInvalidRole"
with pytest.raises(ChatAPIInvalidRole,
match="The Chat API requires a specific format for prompt") as exc_info:
aoai_provider.chat(prompt=prompt, deployment_name="gpt-35-turbo")
assert exc_info.value.error_codes == error_codes.split("/")
def test_aoai_authentication_error_with_bad_api_key(self, azure_open_ai_connection):
azure_open_ai_connection.api_key = "hello"
prompt_template = "please complete this sentence: world war II "
raw_message = (
"Unauthorized. Access token is missing, invalid"
)
error_codes = "UserError/OpenAIError/AuthenticationError"
with pytest.raises(WrappedOpenAIError) as exc_info:
chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo")
assert raw_message in exc_info.value.message
assert exc_info.value.error_codes == error_codes.split("/")
def test_aoai_connection_error_with_bad_api_base(self, azure_open_ai_connection):
azure_open_ai_connection.api_base = "https://gpt-test-eus11.openai.azure.com/"
prompt_template = "please complete this sentence: world war II "
error_codes = "UserError/OpenAIError/APIConnectionError"
with pytest.raises(WrappedOpenAIError) as exc_info:
chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo")
assert "Connection error." in exc_info.value.message
assert exc_info.value.error_codes == error_codes.split("/")
def test_aoai_not_found_error_with_bad_api_version(self, azure_open_ai_connection):
"""NotFoundError: Resource not found"""
azure_open_ai_connection.api_version = "2022-12-23"
prompt_template = "please complete this sentence: world war II "
raw_message = "Resource not found"
error_codes = "UserError/OpenAIError/NotFoundError"
# Chat will throw: Exception occurs: NotFoundError: Resource not found
with pytest.raises(WrappedOpenAIError) as exc_info:
chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo")
assert raw_message in exc_info.value.message
assert exc_info.value.error_codes == error_codes.split("/")
def test_aoai_not_found_error_with_bad_deployment(self, aoai_provider):
"""
NotFoundError: The API deployment for this resource does not exist.
If you created the deployment within the last 5 minutes, please wait a moment and try again.
"""
# This will throw InvalidRequestError
prompt_template = "please complete this sentence: world war II "
deployment = "hello"
raw_message = (
"The API deployment for this resource does not exist. If you created the deployment "
"within the last 5 minutes, please wait a moment and try again."
)
error_codes = "UserError/OpenAIError/NotFoundError"
with pytest.raises(WrappedOpenAIError) as exc_info:
aoai_provider.chat(prompt=f"user:\n{prompt_template}", deployment_name=deployment)
assert raw_message in exc_info.value.message
assert exc_info.value.error_codes == error_codes.split("/")
def test_rate_limit_error_insufficient_quota(self, azure_open_ai_connection, mocker: MockerFixture):
dummyEx = RateLimitError("Something went wrong", response=httpx.Response(
429, request=httpx.Request('GET', 'https://www.example.com')), body={"type": "insufficient_quota"})
mock_method = mocker.patch("openai.resources.Completions.create", side_effect=dummyEx)
error_codes = "UserError/OpenAIError/RateLimitError"
with pytest.raises(WrappedOpenAIError) as exc_info:
completion(connection=azure_open_ai_connection, prompt="hello", deployment_name="text-ada-001")
assert to_openai_error_message(dummyEx) == exc_info.value.message
assert mock_method.call_count == 1
assert exc_info.value.error_codes == error_codes.split("/")
@pytest.mark.parametrize(
"dummyExceptionList",
[
(
[
RateLimitError("Something went wrong", response=httpx.Response(
429, request=httpx.Request('GET', 'https://www.example.com')), body=None),
APITimeoutError(request=httpx.Request('GET', 'https://www.example.com')),
APIConnectionError(
message="('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))",
request=httpx.Request('GET', 'https://www.example.com')),
InternalServerError("Something went wrong", response=httpx.Response(
503, request=httpx.Request('GET', 'https://www.example.com')), body=None),
UnprocessableEntityError("Something went wrong", response=httpx.Response(
422, request=httpx.Request('GET', 'https://www.example.com')), body=None)
]
),
],
)
def test_retriable_openai_error_handle(self, mocker: MockerFixture, dummyExceptionList):
for dummyEx in dummyExceptionList:
# Patch the test_method to throw the desired exception
patched_test_method = mocker.patch("openai.resources.Completions.create", side_effect=dummyEx)
# Apply the retry decorator to the patched test_method
max_retry = 2
delay = 0.2
decorated_test_method = handle_openai_error(tries=max_retry, delay=delay)(patched_test_method)
mock_sleep = mocker.patch("time.sleep") # Create a separate mock for time.sleep
with pytest.raises(UserErrorException) as exc_info:
decorated_test_method()
assert patched_test_method.call_count == max_retry + 1
assert "Exceed max retry times. " + to_openai_error_message(dummyEx) == exc_info.value.message
error_codes = "UserError/OpenAIError/" + type(dummyEx).__name__
assert exc_info.value.error_codes == error_codes.split("/")
expected_calls = [
mocker.call(delay),
mocker.call(delay * 2),
]
mock_sleep.assert_has_calls(expected_calls)
@pytest.mark.parametrize(
"dummyExceptionList",
[
(
[
RateLimitError("Something went wrong", response=httpx.Response(
429, request=httpx.Request('GET', 'https://www.example.com'), headers={"retry-after": "0.3"}),
body=None),
InternalServerError("Something went wrong", response=httpx.Response(
503, request=httpx.Request('GET', 'https://www.example.com'), headers={"retry-after": "0.3"}),
body=None),
UnprocessableEntityError("Something went wrong", response=httpx.Response(
422, request=httpx.Request('GET', 'https://www.example.com'), headers={"retry-after": "0.3"}),
body=None)
]
),
],
)
def test_retriable_openai_error_handle_with_header(
self, mocker: MockerFixture, dummyExceptionList
):
for dummyEx in dummyExceptionList:
# Patch the test_method to throw the desired exception
patched_test_method = mocker.patch("promptflow.tools.aoai.completion", side_effect=dummyEx)
# Apply the retry decorator to the patched test_method
max_retry = 2
delay = 0.2
header_delay = 0.3
decorated_test_method = handle_openai_error(tries=max_retry, delay=delay)(patched_test_method)
mock_sleep = mocker.patch("time.sleep") # Create a separate mock for time.sleep
with pytest.raises(UserErrorException) as exc_info:
decorated_test_method()
assert patched_test_method.call_count == max_retry + 1
assert "Exceed max retry times. " + to_openai_error_message(dummyEx) == exc_info.value.message
error_codes = "UserError/OpenAIError/" + type(dummyEx).__name__
assert exc_info.value.error_codes == error_codes.split("/")
expected_calls = [
mocker.call(header_delay),
mocker.call(header_delay * 2),
]
mock_sleep.assert_has_calls(expected_calls)
@pytest.mark.parametrize(
"dummyExceptionList",
[
(
[
AuthenticationError("Something went wrong", response=httpx.get('https://www.example.com'),
body=None),
BadRequestError("Something went wrong", response=httpx.get('https://www.example.com'),
body=None),
APIConnectionError(message="Something went wrong",
request=httpx.Request('GET', 'https://www.example.com')),
]
),
],
)
def test_non_retriable_openai_error_handle(
self, azure_open_ai_connection, mocker: MockerFixture, dummyExceptionList
):
for dummyEx in dummyExceptionList:
mock_method = mocker.patch("openai.resources.Completions.create", side_effect=dummyEx)
with pytest.raises(UserErrorException) as exc_info:
completion(connection=azure_open_ai_connection, prompt="hello", deployment_name="text-ada-001")
assert to_openai_error_message(dummyEx) == exc_info.value.message
error_codes = "UserError/OpenAIError/" + type(dummyEx).__name__
assert exc_info.value.error_codes == error_codes.split("/")
assert mock_method.call_count == 1
def test_unexpected_error_handle(self, azure_open_ai_connection, mocker: MockerFixture):
dummyEx = Exception("Something went wrong")
chat(connection=azure_open_ai_connection, prompt="user:\nhello", deployment_name="gpt-35-turbo")
mock_method = mocker.patch("openai.resources.chat.Completions.create", side_effect=dummyEx)
error_codes = "UserError/LLMError"
with pytest.raises(LLMError) as exc_info:
chat(connection=azure_open_ai_connection, prompt="user:\nhello", deployment_name="gpt-35-turbo")
assert to_openai_error_message(dummyEx) != exc_info.value.args[0]
assert "OpenAI API hits exception: Exception: Something went wrong" == exc_info.value.message
assert mock_method.call_count == 1
assert exc_info.value.error_codes == error_codes.split("/")
def test_template_syntax_error_handle(self, azure_open_ai_connection, mocker: MockerFixture):
dummyEx = TemplateSyntaxError(message="Something went wrong", lineno=1)
mock_method = mocker.patch("jinja2.Template.__new__", side_effect=dummyEx)
error_codes = "UserError/ToolValidationError/JinjaTemplateError"
with pytest.raises(JinjaTemplateError) as exc_info:
chat(connection=azure_open_ai_connection, prompt="user:\nhello", deployment_name="gpt-35-turbo")
error_message = "Failed to render jinja template: TemplateSyntaxError: Something went wrong\n line 1. " \
+ "Please modify your prompt to fix the issue."
assert error_message == exc_info.value.message
assert mock_method.call_count == 1
assert exc_info.value.error_codes == error_codes.split("/")
@pytest.mark.skip_if_no_api_key("open_ai_connection")
def test_model_not_accept_functions_as_param(
self, open_ai_connection, example_prompt_template, functions):
with pytest.raises(WrappedOpenAIError) as exc_info:
openai_chat(
connection=open_ai_connection,
prompt=example_prompt_template,
model="gpt-3.5-turbo-0301",
functions=functions
)
assert "Current model does not support the `functions` parameter" in exc_info.value.message
def test_input_invalid_function_role_prompt(self, azure_open_ai_connection):
with pytest.raises(ChatAPIFunctionRoleInvalidFormat) as exc_info:
chat(
connection=azure_open_ai_connection,
prompt="function:\n This is function role prompt",
deployment_name="gpt-35-turbo"
)
assert "'name' is required if role is function," in exc_info.value.message
def test_completion_with_chat_model(self, azure_open_ai_connection):
with pytest.raises(UserErrorException) as exc_info:
completion(connection=azure_open_ai_connection, prompt="hello", deployment_name="gpt-35-turbo")
msg = "Completion API is a legacy api and is going to be deprecated soon. " \
"Please change to use Chat API for current model."
assert msg in exc_info.value.message
def test_model_not_support_image_input(
self, azure_open_ai_connection, example_prompt_template_with_image, example_image):
aoai = AzureOpenAIVision(azure_open_ai_connection)
with pytest.raises(WrappedOpenAIError) as exc_info:
aoai.chat(
prompt=example_prompt_template_with_image,
deployment_name="gpt-35-turbo",
max_tokens=480,
temperature=0,
question="which number did you see in this picture?",
image_input=example_image,
)
assert "Current model does not support the image input" in exc_info.value.message
@pytest.mark.parametrize(
"max_tokens, error_message, error_codes, exception",
[
(0, "0 is less than the minimum of 1", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError),
(-1, "-1 is less than the minimum of 1", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError),
("asd", "ValueError: invalid literal for int()", "UserError/LLMError", LLMError)
]
)
def test_aoai_invalid_max_tokens(
self,
azure_open_ai_connection,
example_prompt_template,
chat_history,
max_tokens,
error_message,
error_codes,
exception):
with pytest.raises(exception) as exc_info:
chat(
connection=azure_open_ai_connection,
prompt=example_prompt_template,
deployment_name="gpt-35-turbo",
max_tokens=max_tokens,
temperature=0,
user_input="Write a slogan for product X",
chat_history=chat_history,
)
assert error_message in exc_info.value.message
assert exc_info.value.error_codes == error_codes.split("/")
| promptflow/src/promptflow-tools/tests/test_handle_openai_error.py/0 | {
"file_path": "promptflow/src/promptflow-tools/tests/test_handle_openai_error.py",
"repo_id": "promptflow",
"token_count": 7049
} | 30 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import functools
import json
from typing import Dict, List, Optional
from promptflow._cli._params import (
add_param_all_results,
add_param_archived_only,
add_param_include_archived,
add_param_max_results,
add_param_output,
add_param_output_format,
add_param_overwrite,
add_param_run_name,
add_param_set,
base_params,
)
from promptflow._cli._pf._run import _parse_metadata_args, add_run_create_common, create_run
from promptflow._cli._pf_azure._utils import _get_azure_pf_client
from promptflow._cli._utils import (
_output_result_list_with_format,
_set_workspace_argument_for_subparsers,
activate_action,
exception_handler,
pretty_print_dataframe_as_table,
)
from promptflow._sdk._constants import MAX_SHOW_DETAILS_RESULTS, ListViewType
from promptflow._sdk._errors import InvalidRunStatusError
from promptflow._sdk._utils import print_red_error
from promptflow.azure._restclient.flow_service_caller import FlowRequestException
def add_parser_run(subparsers):
"""Add run parser to the pfazure subparsers."""
run_parser = subparsers.add_parser(
"run", description="A CLI tool to manage cloud runs for prompt flow.", help="Manage prompt flow runs."
)
subparsers = run_parser.add_subparsers()
add_run_create_cloud(subparsers)
add_parser_run_list(subparsers)
add_parser_run_stream(subparsers)
add_parser_run_show(subparsers)
add_parser_run_show_details(subparsers)
add_parser_run_show_metrics(subparsers)
add_parser_run_cancel(subparsers)
add_parser_run_visualize(subparsers)
add_parser_run_archive(subparsers)
add_parser_run_restore(subparsers)
add_parser_run_update(subparsers)
add_parser_run_download(subparsers)
run_parser.set_defaults(action="run")
def add_run_create_cloud(subparsers):
epilog = """
Example:
# Create a run with YAML file:
pfazure run create -f <yaml-filename>
# Create a run from flow directory and reference a run:
pfazure run create --flow <path-to-flow-directory> --data <path-to-data-file> --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run <run-name> --variant "${summarize_text_content.variant_0}" --stream
# Create a run from existing workspace flow
pfazure run create --flow azureml:<flow-name> --data <path-to-data-file> --column-mapping <key-value-pair>
# Create a run from existing registry flow
pfazure run create --flow azureml://registries/<registry-name>/models/<flow-name>/versions/<version> --data <path-to-data-file> --column-mapping <key-value-pair>
""" # noqa: E501
def add_param_data(parser):
# cloud pf can also accept remote data
parser.add_argument(
"--data", type=str, help="Local path to the data file or remote data. e.g. azureml:name:version."
)
add_param_runtime = lambda parser: parser.add_argument("--runtime", type=str, help=argparse.SUPPRESS) # noqa: E731
add_param_reset = lambda parser: parser.add_argument( # noqa: E731
"--reset-runtime", action="store_true", help=argparse.SUPPRESS
)
add_run_create_common(
subparsers,
[add_param_data, add_param_runtime, add_param_reset, _set_workspace_argument_for_subparsers],
epilog=epilog,
)
def add_parser_run_list(subparsers):
"""Add run list parser to the pfazure subparsers."""
epilog = """
Examples:
# List runs status:
pfazure run list
# List most recent 10 runs status:
pfazure run list --max-results 10
# List active and archived runs status:
pfazure run list --include-archived
# List archived runs status only:
pfazure run list --archived-only
# List all runs status as table:
pfazure run list --output table
"""
add_params = [
add_param_max_results,
add_param_archived_only,
add_param_include_archived,
add_param_output_format,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="list",
description="A CLI tool to List all runs.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="List runs in a workspace.",
action_param_name="sub_action",
)
def add_parser_run_stream(subparsers):
"""Add run stream parser to the pfazure subparsers."""
epilog = """
Example:
# Stream run logs:
pfazure run stream --name <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="stream",
description="A CLI tool to stream run logs to the console.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Stream run logs to the console.",
action_param_name="sub_action",
)
def add_parser_run_show(subparsers):
"""Add run show parser to the pfazure subparsers."""
epilog = """
Example:
# Show the status of a run:
pfazure run show --name <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="show",
description="A CLI tool to show a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Show a run.",
action_param_name="sub_action",
)
def add_parser_run_show_details(subparsers):
"""Add run show details parser to the pfazure subparsers."""
epilog = """
Example:
# View input(s) and output(s) of a run:
pfazure run show-details --name <name>
"""
add_param_max_results = lambda parser: parser.add_argument( # noqa: E731
"-r",
"--max-results",
dest="max_results",
type=int,
default=MAX_SHOW_DETAILS_RESULTS,
help=f"Number of lines to show. Default is {MAX_SHOW_DETAILS_RESULTS}.",
)
add_params = [
add_param_max_results,
add_param_run_name,
add_param_all_results,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="show-details",
description="A CLI tool to show a run details.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Show a run details.",
action_param_name="sub_action",
)
def add_parser_run_show_metrics(subparsers):
"""Add run show metrics parser to the pfazure subparsers."""
epilog = """
Example:
# View metrics of a run:
pfazure run show-metrics --name <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="show-metrics",
description="A CLI tool to show run metrics.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Show run metrics.",
action_param_name="sub_action",
)
def add_parser_run_cancel(subparsers):
"""Add run cancel parser to the pfazure subparsers."""
epilog = """
Example:
# Cancel a run:
pfazure run cancel --name <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="cancel",
description="A CLI tool to cancel a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Cancel a run.",
action_param_name="sub_action",
)
def add_parser_run_visualize(subparsers):
"""Add run visualize parser to the pfazure subparsers."""
epilog = """
Examples:
# Visualize a run:
pfazure run visualize -n <name>
# Visualize runs:
pfazure run visualize --names "<name1,name2>"
pfazure run visualize --names "<name1>, <name2>"
"""
add_param_name = lambda parser: parser.add_argument( # noqa: E731
"-n", "--names", type=str, required=True, help="Name of the runs, comma separated."
)
add_param_html_path = lambda parser: parser.add_argument( # noqa: E731
"--html-path", type=str, default=None, help=argparse.SUPPRESS
)
add_params = [
add_param_name,
add_param_html_path,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="visualize",
description="A CLI tool to visualize a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Visualize a run.",
action_param_name="sub_action",
)
def add_parser_run_archive(subparsers):
"""Add run archive parser to the pfazure subparsers."""
epilog = """
Examples:
# Archive a run:
pfazure run archive -n <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="archive",
description="A CLI tool to archive a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Archive a run.",
action_param_name="sub_action",
)
def add_parser_run_restore(subparsers):
"""Add run restore parser to the pfazure subparsers."""
epilog = """
Examples:
# Restore an archived run:
pfazure run restore -n <name>
"""
add_params = [
add_param_run_name,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="restore",
description="A CLI tool to restore a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Restore a run.",
action_param_name="sub_action",
)
def add_parser_run_update(subparsers):
"""Add run update parser to the pfazure subparsers."""
epilog = """
Example:
# Update a run metadata:
pfazure run update --name <name> --set display_name="<display-name>" description="<description>" tags.key="<value>"
"""
add_params = [
add_param_run_name,
add_param_set,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="update",
description="A CLI tool to update a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Update a run.",
action_param_name="sub_action",
)
def add_parser_run_download(subparsers):
"""Add run download parser to the pfazure subparsers."""
epilog = """
Example:
# Download a run data to local:
pfazure run download --name <name> --output <output-folder-path>
"""
add_params = [
add_param_run_name,
add_param_output,
add_param_overwrite,
_set_workspace_argument_for_subparsers,
] + base_params
activate_action(
name="download",
description="A CLI tool to download a run.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Download a run.",
action_param_name="sub_action",
)
def dispatch_run_commands(args: argparse.Namespace):
if args.sub_action == "create":
pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug)
create_run(
create_func=functools.partial(
pf.runs.create_or_update, runtime=args.runtime, reset_runtime=args.reset_runtime
),
args=args,
)
elif args.sub_action == "list":
list_runs(
args.subscription,
args.resource_group,
args.workspace_name,
args.max_results,
args.archived_only,
args.include_archived,
args.output,
)
elif args.sub_action == "show":
show_run(args.subscription, args.resource_group, args.workspace_name, args.name)
elif args.sub_action == "show-details":
show_run_details(
args.subscription,
args.resource_group,
args.workspace_name,
args.name,
args.max_results,
args.all_results,
args.debug,
)
elif args.sub_action == "show-metrics":
show_metrics(args.subscription, args.resource_group, args.workspace_name, args.name)
elif args.sub_action == "stream":
stream_run(args.subscription, args.resource_group, args.workspace_name, args.name, args.debug)
elif args.sub_action == "visualize":
visualize(
args.subscription,
args.resource_group,
args.workspace_name,
args.names,
args.html_path,
args.debug,
)
elif args.sub_action == "archive":
archive_run(args.subscription, args.resource_group, args.workspace_name, args.name)
elif args.sub_action == "restore":
restore_run(args.subscription, args.resource_group, args.workspace_name, args.name)
elif args.sub_action == "update":
update_run(args.subscription, args.resource_group, args.workspace_name, args.name, params=args.params_override)
elif args.sub_action == "download":
download_run(args)
elif args.sub_action == "cancel":
cancel_run(args)
@exception_handler("List runs")
def list_runs(
subscription_id,
resource_group,
workspace_name,
max_results,
archived_only,
include_archived,
output,
):
"""List all runs from cloud."""
if max_results < 1:
raise ValueError(f"'max_results' must be a positive integer, got {max_results!r}")
# Default list_view_type is ACTIVE_ONLY
if archived_only and include_archived:
raise ValueError("Cannot specify both 'archived_only' and 'include_archived'")
list_view_type = ListViewType.ACTIVE_ONLY
if archived_only:
list_view_type = ListViewType.ARCHIVED_ONLY
if include_archived:
list_view_type = ListViewType.ALL
pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name)
runs = pf.runs.list(max_results=max_results, list_view_type=list_view_type)
# hide additional info, debug info and properties in run list for better user experience
run_list = [
run._to_dict(exclude_additional_info=True, exclude_debug_info=True, exclude_properties=True) for run in runs
]
_output_result_list_with_format(result_list=run_list, output_format=output)
return runs
@exception_handler("Show run")
def show_run(subscription_id, resource_group, workspace_name, run_name):
"""Show a run from cloud."""
pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name)
run = pf.runs.get(run=run_name)
print(json.dumps(run._to_dict(), indent=4))
@exception_handler("Show run details")
def show_run_details(subscription_id, resource_group, workspace_name, run_name, max_results, all_results, debug=False):
"""Show a run details from cloud."""
pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name, debug=debug)
details = pf.runs.get_details(run=run_name, max_results=max_results, all_results=all_results)
details.fillna(value="(Failed)", inplace=True) # replace nan with explicit prompt
pretty_print_dataframe_as_table(details)
@exception_handler("Show run metrics")
def show_metrics(subscription_id, resource_group, workspace_name, run_name):
"""Show run metrics from cloud."""
pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name)
metrics = pf.runs.get_metrics(run=run_name)
print(json.dumps(metrics, indent=4))
@exception_handler("Stream run")
def stream_run(subscription_id, resource_group, workspace_name, run_name, debug=False):
"""Stream run logs from cloud."""
pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name, debug=debug)
run = pf.runs.stream(run_name)
print("\n")
print(json.dumps(run._to_dict(), indent=4))
@exception_handler("Visualize run")
def visualize(
subscription_id: str,
resource_group: str,
workspace_name: str,
names: str,
html_path: Optional[str] = None,
debug: bool = False,
):
run_names = [name.strip() for name in names.split(",")]
pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name, debug=debug)
try:
pf.runs.visualize(run_names, html_path=html_path)
except FlowRequestException as e:
error_message = f"Visualize failed, request service error: {str(e)}"
print_red_error(error_message)
except InvalidRunStatusError as e:
error_message = f"Visualize failed: {str(e)}"
print_red_error(error_message)
@exception_handler("Archive run")
def archive_run(
subscription_id: str,
resource_group: str,
workspace_name: str,
run_name: str,
):
pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name)
run = pf.runs.archive(run=run_name)
print(json.dumps(run._to_dict(), indent=4))
@exception_handler("Restore run")
def restore_run(
subscription_id: str,
resource_group: str,
workspace_name: str,
run_name: str,
):
pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name)
run = pf.runs.restore(run=run_name)
print(json.dumps(run._to_dict(), indent=4))
@exception_handler("Update run")
def update_run(
subscription_id: str,
resource_group: str,
workspace_name: str,
run_name: str,
params: List[Dict[str, str]],
):
# params_override can have multiple items when user specifies with
# `--set key1=value1 key2=value`
# so we need to merge them first.
display_name, description, tags = _parse_metadata_args(params)
pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name)
run = pf.runs.update(run=run_name, display_name=display_name, description=description, tags=tags)
print(json.dumps(run._to_dict(), indent=4))
@exception_handler("Download run")
def download_run(args: argparse.Namespace):
pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug)
pf.runs.download(run=args.name, output=args.output, overwrite=args.overwrite)
@exception_handler("Cancel run")
def cancel_run(args: argparse.Namespace):
pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug)
pf.runs.cancel(run=args.name)
| promptflow/src/promptflow/promptflow/_cli/_pf_azure/_run.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf_azure/_run.py",
"repo_id": "promptflow",
"token_count": 7636
} | 31 |
{
"package": {},
"code": {
"line_process.py": {
"type": "python",
"inputs": {
"groundtruth": {
"type": [
"string"
]
},
"prediction": {
"type": [
"string"
]
}
},
"function": "line_process"
},
"aggregate.py": {
"type": "python",
"inputs": {
"processed_results": {
"type": [
"object"
]
}
},
"function": "aggregate"
}
}
}
| promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/.promptflow/flow.tools.json/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/.promptflow/flow.tools.json",
"repo_id": "promptflow",
"token_count": 328
} | 32 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
CONNECTION_NAME_PROPERTY = "__connection_name"
CONNECTION_SECRET_KEYS = "__secret_keys"
PROMPTFLOW_CONNECTIONS = "PROMPTFLOW_CONNECTIONS"
PROMPTFLOW_SECRETS_FILE = "PROMPTFLOW_SECRETS_FILE"
PF_NO_INTERACTIVE_LOGIN = "PF_NO_INTERACTIVE_LOGIN"
PF_LOGGING_LEVEL = "PF_LOGGING_LEVEL"
OPENAI_API_KEY = "openai-api-key"
BING_API_KEY = "bing-api-key"
AOAI_API_KEY = "aoai-api-key"
SERPAPI_API_KEY = "serpapi-api-key"
CONTENT_SAFETY_API_KEY = "content-safety-api-key"
ERROR_RESPONSE_COMPONENT_NAME = "promptflow"
EXTENSION_UA = "prompt-flow-extension"
LANGUAGE_KEY = "language"
DEFAULT_ENCODING = "utf-8"
# Constants related to execution
LINE_NUMBER_KEY = "line_number" # Using the same key with portal.
LINE_TIMEOUT_SEC = 600
class FlowLanguage:
"""The enum of tool source type."""
Python = "python"
CSharp = "csharp"
class AvailableIDE:
VS = "vs"
VS_CODE = "vsc"
USER_AGENT = "USER_AGENT"
PF_USER_AGENT = "PF_USER_AGENT"
CLI_PACKAGE_NAME = 'promptflow'
CURRENT_VERSION = 'current_version'
LATEST_VERSION = 'latest_version'
LAST_HINT_TIME = 'last_hint_time'
LAST_CHECK_TIME = 'last_check_time'
PF_VERSION_CHECK = "pf_version_check.json"
HINT_INTERVAL_DAY = 7
GET_PYPI_INTERVAL_DAY = 7
_ENV_PF_INSTALLER = 'PF_INSTALLER'
| promptflow/src/promptflow/promptflow/_constants.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_constants.py",
"repo_id": "promptflow",
"token_count": 565
} | 33 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import importlib
import importlib.util
import inspect
import logging
import traceback
import types
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Mapping, Optional, Tuple, Union
from promptflow._core._errors import (
InputTypeMismatch,
InvalidSource,
MissingRequiredInputs,
PackageToolNotFoundError,
ToolLoadError,
)
from promptflow._core.tool_meta_generator import (
_parse_tool_from_function,
collect_tool_function_in_module,
load_python_module_from_file,
)
from promptflow._utils.connection_utils import (
generate_custom_strong_type_connection_spec,
generate_custom_strong_type_connection_template,
)
from promptflow._utils.tool_utils import (
_DEPRECATED_TOOLS,
DynamicListError,
RetrieveToolFuncResultError,
_find_deprecated_tools,
append_workspace_triple_to_func_input_params,
function_to_tool_definition,
get_prompt_param_name_from_func,
load_function_from_function_path,
validate_dynamic_list_func_response_type,
validate_tool_func_result,
)
from promptflow._utils.yaml_utils import load_yaml
from promptflow.contracts.flow import InputAssignment, InputValueType, Node, ToolSourceType
from promptflow.contracts.tool import ConnectionType, Tool, ToolType
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException, ValidationException
module_logger = logging.getLogger(__name__)
PACKAGE_TOOLS_ENTRY = "package_tools"
def collect_tools_from_directory(base_dir) -> dict:
tools = {}
for f in Path(base_dir).glob("**/*.yaml"):
with open(f, "r") as f:
tools_in_file = load_yaml(f)
for identifier, tool in tools_in_file.items():
tools[identifier] = tool
return tools
def _get_entry_points_by_group(group):
# lazy load to improve performance for scenarios that don't need to load package tools
import importlib.metadata
# In python3.10 and later, the entry_points() method returns a SelectableView of EntryPoint objects,
# which allows us to select entry points by group. In the previous versions, the entry_points() method
# returns a dictionary-like object, we can use group name directly as a key.
entry_points = importlib.metadata.entry_points()
if isinstance(entry_points, list):
return entry_points.select(group=group)
else:
return entry_points.get(group, [])
def collect_package_tools(keys: Optional[List[str]] = None) -> dict:
"""Collect all tools from all installed packages."""
all_package_tools = {}
if keys is not None:
keys = set(keys)
entry_points = _get_entry_points_by_group(PACKAGE_TOOLS_ENTRY)
for entry_point in entry_points:
try:
list_tool_func = entry_point.load()
package_tools = list_tool_func()
for identifier, tool in package_tools.items():
# Only load required tools to avoid unnecessary loading when keys is provided
if isinstance(keys, set) and identifier not in keys:
# Support to collect new tool id if node source tool is a deprecated tool.
deprecated_tool_ids = tool.get(_DEPRECATED_TOOLS, [])
if not set(deprecated_tool_ids).intersection(keys):
continue
m = tool["module"]
importlib.import_module(m) # Import the module to make sure it is valid
tool["package"] = entry_point.dist.metadata["Name"]
tool["package_version"] = entry_point.dist.version
all_package_tools[identifier] = tool
except Exception as e:
msg = (
f"Failed to load tools from package {entry_point.dist.metadata['Name']}: {e},"
+ f" traceback: {traceback.format_exc()}"
)
module_logger.warning(msg)
return all_package_tools
def collect_package_tools_and_connections(keys: Optional[List[str]] = None) -> dict:
"""Collect all tools and custom strong type connections from all installed packages."""
all_package_tools = {}
all_package_connection_specs = {}
all_package_connection_templates = {}
if keys is not None:
keys = set(keys)
entry_points = _get_entry_points_by_group(PACKAGE_TOOLS_ENTRY)
for entry_point in entry_points:
try:
list_tool_func = entry_point.load()
package_tools = list_tool_func()
for identifier, tool in package_tools.items():
# Only load required tools to avoid unnecessary loading when keys is provided
if isinstance(keys, set) and identifier not in keys:
continue
m = tool["module"]
module = importlib.import_module(m) # Import the module to make sure it is valid
tool["package"] = entry_point.dist.metadata["Name"]
tool["package_version"] = entry_point.dist.version
all_package_tools[identifier] = tool
# Get custom strong type connection definition
custom_strong_type_connections_classes = [
obj
for name, obj in inspect.getmembers(module)
if inspect.isclass(obj)
and ConnectionType.is_custom_strong_type(obj)
and (not ConnectionType.is_connection_class_name(name))
]
if custom_strong_type_connections_classes:
for cls in custom_strong_type_connections_classes:
identifier = f"{cls.__module__}.{cls.__name__}"
connection_spec = generate_custom_strong_type_connection_spec(
cls, entry_point.dist.metadata["Name"], entry_point.dist.version
)
all_package_connection_specs[identifier] = connection_spec
all_package_connection_templates[identifier] = generate_custom_strong_type_connection_template(
cls, connection_spec, entry_point.dist.metadata["Name"], entry_point.dist.version
)
except Exception as e:
msg = (
f"Failed to load tools from package {entry_point.dist.metadata['Name']}: {e},"
+ f" traceback: {traceback.format_exc()}"
)
module_logger.warning(msg)
return all_package_tools, all_package_connection_specs, all_package_connection_templates
def retrieve_tool_func_result(
func_call_scenario: str, func_path: str, func_input_params_dict: Dict, ws_triple_dict: Dict[str, str] = {}
):
func = load_function_from_function_path(func_path)
# get param names from func signature.
func_sig_params = inspect.signature(func).parameters
module_logger.warning(f"func_sig_params of func_path is: '{func_sig_params}'")
module_logger.warning(f"func_input_params_dict is: '{func_input_params_dict}'")
# Append workspace triple to func input params if func signature has kwargs param.
# Or append ws_triple_dict params that are in func signature.
combined_func_input_params = append_workspace_triple_to_func_input_params(
func_sig_params, func_input_params_dict, ws_triple_dict
)
try:
result = func(**combined_func_input_params)
except Exception as e:
raise RetrieveToolFuncResultError(f"Error when calling function {func_path}: {e}")
validate_tool_func_result(func_call_scenario, result)
return result
def gen_dynamic_list(func_path: str, func_input_params_dict: Dict, ws_triple_dict: Dict[str, str] = {}):
func = load_function_from_function_path(func_path)
# get param names from func signature.
func_sig_params = inspect.signature(func).parameters
module_logger.warning(f"func_sig_params of func_path is: '{func_sig_params}'")
module_logger.warning(f"func_input_params_dict is: '{func_input_params_dict}'")
combined_func_input_params = append_workspace_triple_to_func_input_params(
func_sig_params, func_input_params_dict, ws_triple_dict
)
try:
result = func(**combined_func_input_params)
except Exception as e:
raise DynamicListError(f"Error when calling function {func_path}: {e}")
# validate response is of required format. Throw correct message if response is empty.
validate_dynamic_list_func_response_type(result, func.__name__)
return result
class BuiltinsManager:
def __init__(self) -> None:
pass
@staticmethod
def _load_llm_api(api_name: str) -> Tool:
result = apis.get(api_name)
if result is None:
raise APINotFound(
message=f"The API '{api_name}' is not found.",
target=ErrorTarget.EXECUTOR,
)
return result
def load_builtin(
self,
tool: Tool,
node_inputs: Optional[dict] = None,
) -> Tuple[Callable, dict]:
return BuiltinsManager._load_package_tool(tool.name, tool.module, tool.class_name, tool.function, node_inputs)
@staticmethod
def _load_package_tool(tool_name, module_name, class_name, method_name, node_inputs):
module = importlib.import_module(module_name)
return BuiltinsManager._load_tool_from_module(
module, tool_name, module_name, class_name, method_name, node_inputs
)
@staticmethod
def _load_tool_from_module(
module, tool_name, module_name, class_name, method_name, node_inputs: Mapping[str, InputAssignment]
):
"""Load tool from given module with node inputs."""
if class_name is None:
return getattr(module, method_name), {}
provider_class = getattr(module, class_name)
# Note: v -- type is InputAssignment
init_inputs = provider_class.get_initialize_inputs()
init_inputs_values = {}
for k, v in node_inputs.items():
if k not in init_inputs:
continue
if v.value_type != InputValueType.LITERAL:
raise InputTypeMismatch(
message_format=(
"Invalid input for '{tool_name}': Initialization input '{input_name}' requires a literal "
"value, but {input_value} was received."
),
tool_name=tool_name,
input_name=k,
input_value=v.serialize(),
target=ErrorTarget.EXECUTOR,
)
init_inputs_values[k] = v.value
missing_inputs = set(provider_class.get_required_initialize_inputs()) - set(init_inputs_values)
if missing_inputs:
raise MissingRequiredInputs(
message=f"Required inputs {list(missing_inputs)} are not provided for tool '{tool_name}'.",
target=ErrorTarget.EXECUTOR,
)
try:
api = getattr(provider_class(**init_inputs_values), method_name)
except Exception as ex:
error_type_and_message = f"({ex.__class__.__name__}) {ex}"
raise ToolLoadError(
module=module_name,
message_format="Failed to load package tool '{tool_name}': {error_type_and_message}",
tool_name=tool_name,
error_type_and_message=error_type_and_message,
) from ex
# Return the init_inputs to update node inputs in the afterward steps
return api, init_inputs
@staticmethod
def load_tool_by_api_name(api_name: str) -> Tool:
if api_name is None:
return None
return BuiltinsManager._load_llm_api(api_name)
def load_prompt_with_api(self, tool: Tool, api: Tool, node_inputs: Optional[dict] = None) -> Tuple[Callable, dict]:
"""Load a prompt template tool with action."""
# Load provider action function
api_func, init_inputs = self.load_builtin(api, node_inputs)
# Find the prompt template parameter name and parse tool code to it.
prompt_tpl_param_name = get_prompt_param_name_from_func(api_func)
api_func = partial(api_func, **{prompt_tpl_param_name: tool.code}) if prompt_tpl_param_name else api_func
# Return the init_inputs to update node inputs in the afterward steps
return api_func, init_inputs
def load_prompt_rendering(self, tool: Tool):
if not tool.code:
tool.code = ""
from promptflow.tools.template_rendering import render_template_jinja2
return partial(render_template_jinja2, template=tool.code)
@staticmethod
def parse_builtin_tool_method(tool: Tool) -> tuple:
module_name = tool.module
class_name = tool.class_name
method_name = tool.function
return module_name, class_name, method_name
@staticmethod
def is_builtin(tool: Tool) -> bool:
"""Check if the tool is a builtin tool."""
return tool.type == ToolType.PYTHON and tool.code is None and tool.source is None
@staticmethod
def is_llm(tool: Tool) -> bool:
"""Check if the tool is a LLM tool."""
return tool.type == ToolType.LLM
@staticmethod
def is_custom_python(tool: Tool) -> bool:
"""Check if the tool is a custom python tool."""
return tool.type == ToolType.PYTHON and not BuiltinsManager.is_builtin(tool)
class ToolsManager:
"""Manage all builtins and user-defined tools."""
def __init__(
self,
loaded_tools: Optional[Mapping[str, Callable]] = None,
) -> None:
loaded_tools = loaded_tools or {}
self._tools = {k: v for k, v in loaded_tools.items()}
def load_tools(self, tools: Mapping[str, Callable]) -> None:
"""Load new tools to the manager."""
self._tools.update(tools)
def loaded(self, tool: str) -> bool:
return tool in self._tools
def get_tool(self, key: str) -> Callable:
if key not in self._tools:
raise ValueError(f"Tool for {key} is not loaded")
return self._tools[key]
def wrap_tool(self, key: str, wrapper: Callable):
"""Wraps the tool with specific name by a given wrapper.
Sometimes we may want to wrap the tool with a decorator, but we don't want to modify the original tool.
i.e. We may want to pass additional arguments to the tool by wrapping it with a decorator,
such as turning on the stream response for AzureOpenAI.chat() by adding a "stream=True" argument.
"""
tool = self.get_tool(key)
self._tools.update({key: wrapper(tool)})
def assert_loaded(self, tool: str):
if tool not in self._tools:
raise ValueError(f"Tool {tool} is not loaded")
# TODO: Remove this method. The code path will not be used in code-first experience.
# Customers are familiar with the term "node", so we use it in error message.
@staticmethod
def _load_custom_tool(tool: Tool, node_name: str) -> Callable:
func_name = tool.function or tool.name
if tool.source and Path(tool.source).exists(): # If source file is provided, load the function from the file
m = load_python_module_from_file(tool.source)
if m is None:
raise CustomToolSourceLoadError(f"Cannot load module from source {tool.source} for node {node_name}.")
return getattr(m, func_name)
if not tool.code:
raise EmptyCodeInCustomTool(f"Missing code in node {node_name}.")
func_code = tool.code
try:
f_globals = {}
exec(func_code, f_globals)
except Exception as e:
raise CustomPythonToolLoadError(f"Error when loading code of node {node_name}: {e}") from e
if func_name not in f_globals:
raise MissingTargetFunction(f"Cannot find function {func_name} in the code of node {node_name}.")
return f_globals[func_name]
class ToolLoader:
def __init__(self, working_dir: str, package_tool_keys: Optional[List[str]] = None) -> None:
self._working_dir = working_dir
self._package_tools = collect_package_tools(package_tool_keys) if package_tool_keys else {}
# Used to handle backward compatibility of tool ID changes.
self._deprecated_tools = _find_deprecated_tools(self._package_tools)
# TODO: Replace NotImplementedError with NotSupported in the future.
def load_tool_for_node(self, node: Node) -> Tool:
if node.source is None:
raise UserErrorException(f"Node {node.name} does not have source defined.")
if node.type == ToolType.PYTHON:
if node.source.type == ToolSourceType.Package:
return self.load_tool_for_package_node(node)
elif node.source.type == ToolSourceType.Code:
_, tool = self.load_tool_for_script_node(node)
return tool
raise NotImplementedError(f"Tool source type {node.source.type} for python tool is not supported yet.")
elif node.type == ToolType.CUSTOM_LLM:
if node.source.type == ToolSourceType.PackageWithPrompt:
return self.load_tool_for_package_node(node)
raise NotImplementedError(f"Tool source type {node.source.type} for custom_llm tool is not supported yet.")
else:
raise NotImplementedError(f"Tool type {node.type} is not supported yet.")
def load_tool_for_package_node(self, node: Node) -> Tool:
if node.source.tool in self._package_tools:
return Tool.deserialize(self._package_tools[node.source.tool])
# If node source tool is not in package tools, try to find the tool ID in deprecated tools.
# If found, load the tool with the new tool ID for backward compatibility.
if node.source.tool in self._deprecated_tools:
new_tool_id = self._deprecated_tools[node.source.tool]
# Used to collect deprecated tool usage and warn user to replace the deprecated tool with the new one.
module_logger.warning(f"Tool ID '{node.source.tool}' is deprecated. Please use '{new_tool_id}' instead.")
return Tool.deserialize(self._package_tools[new_tool_id])
raise PackageToolNotFoundError(
f"Package tool '{node.source.tool}' is not found in the current environment. "
f"All available package tools are: {list(self._package_tools.keys())}.",
target=ErrorTarget.EXECUTOR,
)
def load_tool_for_script_node(self, node: Node) -> Tuple[types.ModuleType, Tool]:
if node.source.path is None:
raise InvalidSource(
target=ErrorTarget.EXECUTOR,
message_format="Load tool failed for node '{node_name}'. The source path is 'None'.",
node_name=node.name,
)
path = node.source.path
if not (self._working_dir / path).is_file():
raise InvalidSource(
target=ErrorTarget.EXECUTOR,
message_format="Load tool failed for node '{node_name}'. Tool file '{source_path}' can not be found.",
source_path=path,
node_name=node.name,
)
m = load_python_module_from_file(self._working_dir / path)
if m is None:
raise CustomToolSourceLoadError(f"Cannot load module from {path}.")
f, init_inputs = collect_tool_function_in_module(m)
return m, _parse_tool_from_function(f, init_inputs, gen_custom_type_conn=True)
def load_tool_for_llm_node(self, node: Node) -> Tool:
api_name = f"{node.provider}.{node.api}"
return BuiltinsManager._load_llm_api(api_name)
builtins = {}
apis = {}
connections = {}
connection_type_to_api_mapping = {}
def _register(provider_cls, collection, type):
from promptflow._core.tool import ToolProvider
if not issubclass(provider_cls, ToolProvider):
raise Exception(f"Class {provider_cls.__name__!r} must be a subclass of promptflow.ToolProvider.")
initialize_inputs = provider_cls.get_initialize_inputs()
# Build tool/provider definition
for name, value in provider_cls.__dict__.items():
if hasattr(value, "__original_function"):
name = value.__original_function.__qualname__
value.__tool = function_to_tool_definition(value, type=type, initialize_inputs=initialize_inputs)
collection[name] = value.__tool
module_logger.debug(f"Registered {name} as a builtin function")
# Get the connection type - provider name mapping for execution use
# Tools/Providers related connection must have been imported
for param in initialize_inputs.values():
if not param.annotation:
continue
annotation_type_name = param.annotation.__name__
if annotation_type_name in connections:
api_name = provider_cls.__name__
module_logger.debug(f"Add connection type {annotation_type_name} to api {api_name} mapping")
connection_type_to_api_mapping[annotation_type_name] = api_name
break
def _register_method(provider_method, collection, type):
name = provider_method.__qualname__
provider_method.__tool = function_to_tool_definition(provider_method, type=type)
collection[name] = provider_method.__tool
module_logger.debug(f"Registered {name} as {type} function")
def register_builtins(provider_cls):
_register(provider_cls, builtins, ToolType.PYTHON)
def register_apis(provider_cls):
_register(provider_cls, apis, ToolType._ACTION)
def register_builtin_method(provider_method):
_register_method(provider_method, builtins, ToolType.PYTHON)
def register_api_method(provider_method):
_register_method(provider_method, apis, ToolType._ACTION)
def register_connections(connection_classes: Union[type, List[type]]):
connection_classes = [connection_classes] if not isinstance(connection_classes, list) else connection_classes
connections.update({cls.__name__: cls for cls in connection_classes})
class CustomToolSourceLoadError(SystemErrorException):
pass
class CustomToolError(UserErrorException):
"""Base exception raised when failed to validate tool."""
def __init__(self, message):
super().__init__(message, target=ErrorTarget.TOOL)
class EmptyCodeInCustomTool(CustomToolError):
pass
class CustomPythonToolLoadError(CustomToolError):
pass
class MissingTargetFunction(CustomToolError):
pass
class APINotFound(ValidationException):
pass
| promptflow/src/promptflow/promptflow/_core/tools_manager.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/tools_manager.py",
"repo_id": "promptflow",
"token_count": 9268
} | 34 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from os import PathLike
from typing import IO, AnyStr, Union
from promptflow._sdk._load_functions import load_run
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._run import Run
def _create_run(run: Run, **kwargs):
client = PFClient()
return client.runs.create_or_update(run=run, **kwargs)
def create_yaml_run(source: Union[str, PathLike, IO[AnyStr]], params_override: list = None, **kwargs):
"""Create a run from a yaml file. Should only call from CLI."""
run = load_run(source, params_override=params_override, **kwargs)
return _create_run(run=run, **kwargs)
| promptflow/src/promptflow/promptflow/_sdk/_run_functions.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_run_functions.py",
"repo_id": "promptflow",
"token_count": 236
} | 35 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow.exceptions import ErrorTarget, UserErrorException
class BadRequest(UserErrorException):
pass
class JsonPayloadRequiredForMultipleInputFields(BadRequest):
pass
class MissingRequiredFlowInput(BadRequest):
pass
class FlowConnectionError(UserErrorException):
pass
class UnsupportedConnectionProvider(FlowConnectionError):
def __init__(self, provider):
super().__init__(
message_format="Unsupported connection provider {provider}, " "supported are 'local' and typing.Callable.",
provider=provider,
target=ErrorTarget.FLOW_INVOKER,
)
class MissingConnectionProvider(FlowConnectionError):
pass
class InvalidConnectionData(FlowConnectionError):
def __init__(self, connection_name):
super().__init__(
message_format="Invalid connection data detected while overriding connection {connection_name}.",
connection_name=connection_name,
target=ErrorTarget.FLOW_INVOKER)
class UnexpectedConnectionProviderReturn(FlowConnectionError):
pass
class MultipleStreamOutputFieldsNotSupported(UserErrorException):
def __init__(self):
super().__init__(
"Multiple stream output fields not supported.",
target=ErrorTarget.SERVING_APP,
)
class NotAcceptable(UserErrorException):
def __init__(self, media_type, supported_media_types):
super().__init__(
message_format="Media type {media_type} in Accept header is not acceptable. "
"Supported media type(s) - {supported_media_types}",
media_type=media_type,
supported_media_types=supported_media_types,
target=ErrorTarget.SERVING_APP,
)
| promptflow/src/promptflow/promptflow/_sdk/_serving/_errors.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/_errors.py",
"repo_id": "promptflow",
"token_count": 655
} | 36 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import time
from types import GeneratorType
from flask import Response, jsonify
from werkzeug.datastructures import MIMEAccept
from promptflow._sdk._serving._errors import MultipleStreamOutputFieldsNotSupported, NotAcceptable
class ResponseCreator:
"""Generates http response from flow run result."""
def __init__(
self,
flow_run_result,
accept_mimetypes,
stream_start_callback_func=None,
stream_end_callback_func=None,
stream_event_callback_func=None
):
# Fields that are with GeneratorType are streaming outputs.
stream_fields = [k for k, v in flow_run_result.items() if isinstance(v, GeneratorType)]
if len(stream_fields) > 1:
raise MultipleStreamOutputFieldsNotSupported()
self.stream_field_name = stream_fields[0] if stream_fields else None
self.stream_iterator = flow_run_result.pop(self.stream_field_name, None)
self.non_stream_fields = flow_run_result
# According to RFC2616, if "Accept" header is not specified,
# then it is assumed that the client accepts all media types.
# Set */* as the default value here.
# https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
if not accept_mimetypes:
accept_mimetypes = MIMEAccept([("*/*", 1)])
self.accept_mimetypes = accept_mimetypes
self._on_stream_start = stream_start_callback_func
self._on_stream_end = stream_end_callback_func
self._on_stream_event = stream_event_callback_func
@property
def has_stream_field(self):
return self.stream_field_name is not None
@property
def text_stream_specified_explicitly(self):
"""Returns True only when text/event-stream is specified explicitly.
For other cases like */* or text/*, it will return False.
"""
return "text/event-stream" in self.accept_mimetypes.values()
@property
def accept_json(self):
"""Returns True if the Accept header includes application/json.
It also returns True when specified with */* or application/*.
"""
return self.accept_mimetypes.accept_json
def create_text_stream_response(self):
def format_event(data):
return f"data: {json.dumps(data)}\n\n"
def generate():
start_time = time.time()
if self._on_stream_start:
self._on_stream_start()
# If there are non streaming fields, yield them firstly.
if self.non_stream_fields:
yield format_event(self.non_stream_fields)
# If there is stream field, read and yield data until the end.
if self.stream_iterator is not None:
for chunk in self.stream_iterator:
if self._on_stream_event:
self._on_stream_event(chunk)
yield format_event({self.stream_field_name: chunk})
if self._on_stream_end:
duration = (time.time() - start_time) * 1000
self._on_stream_end(duration)
return Response(generate(), mimetype="text/event-stream")
def create_json_response(self):
# If there is stream field, iterate over it and get the merged result.
if self.stream_iterator is not None:
merged_text = "".join(self.stream_iterator)
self.non_stream_fields[self.stream_field_name] = merged_text
return jsonify(self.non_stream_fields)
def create_response(self):
if self.has_stream_field:
if self.text_stream_specified_explicitly:
return self.create_text_stream_response()
elif self.accept_json:
return self.create_json_response()
else:
raise NotAcceptable(
media_type=self.accept_mimetypes, supported_media_types="text/event-stream, application/json"
)
else:
if self.accept_json:
return self.create_json_response()
else:
raise NotAcceptable(media_type=self.accept_mimetypes, supported_media_types="application/json")
| promptflow/src/promptflow/promptflow/_sdk/_serving/response_creator.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/response_creator.py",
"repo_id": "promptflow",
"token_count": 1813
} | 37 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from ._asset_utils import IgnoreFile, get_ignore_file, get_upload_files_from_folder
__all__ = ["get_ignore_file", "IgnoreFile", "get_upload_files_from_folder"]
| promptflow/src/promptflow/promptflow/_sdk/_vendor/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_vendor/__init__.py",
"repo_id": "promptflow",
"token_count": 79
} | 38 |
import os
import sys
from promptflow._cli._pf._connection import create_connection
from streamlit.web import cli as st_cli
from streamlit.runtime import exists
from main import start
def is_yaml_file(file_path):
# Get the file extension
_, file_extension = os.path.splitext(file_path)
# Check if the file extension is ".yaml" or ".yml"
return file_extension.lower() in ('.yaml', '.yml')
def create_connections(directory_path) -> None:
for root, dirs, files in os.walk(directory_path):
for file in files:
file_path = os.path.join(root, file)
if is_yaml_file(file_path):
create_connection(file_path)
if __name__ == "__main__":
create_connections(os.path.join(os.path.dirname(__file__), "connections"))
if exists():
start()
else:
main_script = os.path.join(os.path.dirname(__file__), "main.py")
sys.argv = ["streamlit", "run", main_script, "--global.developmentMode=false", "--client.toolbarMode=viewer", "--browser.gatherUsageStats=false"]
st_cli.main(prog_name="streamlit")
| promptflow/src/promptflow/promptflow/_sdk/data/executable/app.py.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/executable/app.py.jinja2",
"repo_id": "promptflow",
"token_count": 435
} | 39 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=protected-access
import copy
import json
import os.path
import typing
from pathlib import Path
from typing import Dict, List, Optional
import pydash
import strictyaml
from marshmallow import ValidationError
from promptflow._utils.logger_utils import get_cli_sdk_logger
logger = get_cli_sdk_logger()
class _ValidationStatus:
"""Validation status class.
Validation status is used to indicate the status of an validation result. It can be one of the following values:
Succeeded, Failed.
"""
SUCCEEDED = "Succeeded"
"""Succeeded."""
FAILED = "Failed"
"""Failed."""
class Diagnostic(object):
"""Represents a diagnostic of an asset validation error with the location info."""
def __init__(self, yaml_path: str, message: str, error_code: str, **kwargs) -> None:
"""Init Diagnostic.
:keyword yaml_path: A dash path from root to the target element of the diagnostic.
:paramtype yaml_path: str
:keyword message: Error message of diagnostic.
:paramtype message: str
:keyword error_code: Error code of diagnostic.
:paramtype error_code: str
"""
self.yaml_path = yaml_path
self.message = message
self.error_code = error_code
self.local_path, self.value = None, None
self._key = kwargs.pop("key", "yaml_path")
# Set extra info to attribute
for k, v in kwargs.items():
if not k.startswith("_"):
setattr(self, k, v)
def __repr__(self) -> str:
"""The asset friendly name and error message.
:return: The formatted diagnostic
:rtype: str
"""
return "{}: {}".format(getattr(self, self._key), self.message)
@classmethod
def create_instance(
cls,
yaml_path: str,
message: Optional[str] = None,
error_code: Optional[str] = None,
**kwargs,
):
"""Create a diagnostic instance.
:param yaml_path: A dash path from root to the target element of the diagnostic.
:type yaml_path: str
:param message: Error message of diagnostic.
:type message: str
:param error_code: Error code of diagnostic.
:type error_code: str
:return: The created instance
:rtype: Diagnostic
"""
return cls(
yaml_path=yaml_path,
message=message,
error_code=error_code,
**kwargs,
)
class ValidationResult(object):
"""Represents the result of validation.
This class is used to organize and parse diagnostics from both client & server side before expose them. The result
is immutable.
"""
def __init__(self) -> None:
self._target_obj = None
self._errors = []
self._warnings = []
self._kwargs = {}
def _set_extra_info(self, key, value):
self._kwargs[key] = value
def _get_extra_info(self, key, default=None):
return self._kwargs.get(key, default)
@property
def error_messages(self) -> Dict:
"""
Return all messages of errors in the validation result.
:return: A dictionary of error messages. The key is the yaml path of the error, and the value is the error
message.
:rtype: dict
"""
messages = {}
for diagnostic in self._errors:
message_key = getattr(diagnostic, diagnostic._key)
if message_key not in messages:
messages[message_key] = diagnostic.message
else:
messages[message_key] += "; " + diagnostic.message
return messages
@property
def passed(self) -> bool:
"""Returns boolean indicating whether any errors were found.
:return: True if the validation passed, False otherwise.
:rtype: bool
"""
return not self._errors
def _to_dict(self) -> typing.Dict[str, typing.Any]:
result = {
"result": _ValidationStatus.SUCCEEDED if self.passed else _ValidationStatus.FAILED,
}
result.update(self._kwargs)
for diagnostic_type, diagnostics in [
("errors", self._errors),
("warnings", self._warnings),
]:
messages = []
for diagnostic in diagnostics:
message = {
"message": diagnostic.message,
"path": diagnostic.yaml_path,
"value": pydash.get(self._target_obj, diagnostic.yaml_path, diagnostic.value),
}
if diagnostic.local_path:
message["location"] = str(diagnostic.local_path)
for attr in dir(diagnostic):
if attr not in message and not attr.startswith("_") and not callable(getattr(diagnostic, attr)):
message[attr] = getattr(diagnostic, attr)
message = {k: v for k, v in message.items() if v is not None}
messages.append(message)
if messages:
result[diagnostic_type] = messages
return result
def __repr__(self) -> str:
"""Get the string representation of the validation result.
:return: The string representation
:rtype: str
"""
return json.dumps(self._to_dict(), indent=2)
class MutableValidationResult(ValidationResult):
"""Used by the client side to construct a validation result.
The result is mutable and should not be exposed to the user.
"""
def __init__(self, target_obj: Optional[typing.Dict[str, typing.Any]] = None):
super().__init__()
self._target_obj = target_obj
def merge_with(
self,
target: ValidationResult,
field_name: Optional[str] = None,
condition_skip: Optional[typing.Callable] = None,
overwrite: bool = False,
):
"""Merge errors & warnings in another validation results into current one.
Will update current validation result.
If field_name is not None, then yaml_path in the other validation result will be updated accordingly.
* => field_name, a.b => field_name.a.b e.g.. If None, then no update.
:param target: Validation result to merge.
:type target: ValidationResult
:param field_name: The base field name for the target to merge.
:type field_name: str
:param condition_skip: A function to determine whether to skip the merge of a diagnostic in the target.
:type condition_skip: typing.Callable
:param overwrite: Whether to overwrite the current validation result. If False, all diagnostics will be kept;
if True, current diagnostics with the same yaml_path will be dropped.
:type overwrite: bool
:return: The current validation result.
:rtype: MutableValidationResult
"""
for source_diagnostics, target_diagnostics in [
(target._errors, self._errors),
(target._warnings, self._warnings),
]:
if overwrite:
keys_to_remove = set(map(lambda x: x.yaml_path, source_diagnostics))
target_diagnostics[:] = [
diagnostic for diagnostic in target_diagnostics if diagnostic.yaml_path not in keys_to_remove
]
for diagnostic in source_diagnostics:
if condition_skip and condition_skip(diagnostic):
continue
new_diagnostic = copy.deepcopy(diagnostic)
if field_name:
if new_diagnostic.yaml_path == "*":
new_diagnostic.yaml_path = field_name
else:
new_diagnostic.yaml_path = field_name + "." + new_diagnostic.yaml_path
target_diagnostics.append(new_diagnostic)
return self
def try_raise(
self,
raise_error: bool = True,
*,
error_func: typing.Callable[[str, str], Exception] = None,
) -> "MutableValidationResult":
"""Try to raise an error from the validation result.
If the validation is passed or raise_error is False, this method
will return the validation result.
:param raise_error: Whether to raise the error.
:type raise_error: bool
:keyword error_func: A function to create the error. If None, a marshmallow.ValidationError will be created.
The first parameter of the function is the string representation of the validation result,
and the second parameter is the error message without personal data.
:type error_func: typing.Callable[[str, str], Exception]
:return: The current validation result.
:rtype: MutableValidationResult
"""
# pylint: disable=logging-not-lazy
if raise_error is False:
return self
if self._warnings:
logger.warning("Schema validation warnings: %s" % str(self._warnings))
if not self.passed:
if error_func is None:
def error_func(msg, _):
return ValidationError(message=msg)
raise error_func(
self.__repr__(),
f"Schema validation failed: {self.error_messages}",
)
return self
def append_error(
self,
yaml_path: str = "*",
message: Optional[str] = None,
error_code: Optional[str] = None,
**kwargs,
):
"""Append an error to the validation result.
:param yaml_path: The yaml path of the error.
:type yaml_path: str
:param message: The message of the error.
:type message: str
:param error_code: The error code of the error.
:type error_code: str
:return: The current validation result.
:rtype: MutableValidationResult
"""
self._errors.append(
Diagnostic.create_instance(
yaml_path=yaml_path,
message=message,
error_code=error_code,
**kwargs,
)
)
return self
def resolve_location_for_diagnostics(self, source_path: str, resolve_value: bool = False):
"""Resolve location/value for diagnostics based on the source path where the validatable object is loaded.
Location includes local path of the exact file (can be different from the source path) & line number of the
invalid field. Value of a diagnostic is resolved from the validatable object in transfering to a dict by
default; however, when the validatable object is not available for the validation result, validation result is
created from marshmallow.ValidationError.messages e.g., it can be resolved from the source path.
:param source_path: The path of the source file.
:type source_path: str
:param resolve_value: Whether to resolve the value of the invalid field from source file.
:type resolve_value: bool
"""
resolver = _YamlLocationResolver(source_path)
for diagnostic in self._errors + self._warnings:
diagnostic.local_path, value = resolver.resolve(diagnostic.yaml_path)
if value is not None and resolve_value:
diagnostic.value = value
def append_warning(
self,
yaml_path: str = "*",
message: Optional[str] = None,
error_code: Optional[str] = None,
**kwargs,
):
"""Append a warning to the validation result.
:param yaml_path: The yaml path of the warning.
:type yaml_path: str
:param message: The message of the warning.
:type message: str
:param error_code: The error code of the warning.
:type error_code: str
:return: The current validation result.
:rtype: MutableValidationResult
"""
self._warnings.append(
Diagnostic.create_instance(
yaml_path=yaml_path,
message=message,
error_code=error_code,
**kwargs,
)
)
return self
class ValidationResultBuilder:
"""A helper class to create a validation result."""
UNKNOWN_MESSAGE = "Unknown field."
def __init__(self):
pass
@classmethod
def success(cls) -> MutableValidationResult:
"""Create a validation result with success status.
:return: A validation result
:rtype: MutableValidationResult
"""
return MutableValidationResult()
@classmethod
def from_single_message(
cls, singular_error_message: Optional[str] = None, yaml_path: str = "*", data: Optional[dict] = None
):
"""Create a validation result with only 1 diagnostic.
:param singular_error_message: diagnostic.message.
:type singular_error_message: Optional[str]
:param yaml_path: diagnostic.yaml_path.
:type yaml_path: str
:param data: serializedvalidation target.
:type data: Optional[Dict]
:return: The validation result
:rtype: MutableValidationResult
"""
obj = MutableValidationResult(target_obj=data)
if singular_error_message:
obj.append_error(message=singular_error_message, yaml_path=yaml_path)
return obj
@classmethod
def from_validation_error(
cls, error: ValidationError, *, source_path: Optional[str] = None, error_on_unknown_field=False
) -> MutableValidationResult:
"""Create a validation result from a ValidationError, which will be raised in marshmallow.Schema.load. Please
use this function only for exception in loading file.
:param error: ValidationError raised by marshmallow.Schema.load.
:type error: ValidationError
:keyword error_on_unknown_field: whether to raise error if there are unknown field diagnostics.
:paramtype error_on_unknown_field: bool
:return: The validation result
:rtype: MutableValidationResult
"""
obj = cls.from_validation_messages(
error.messages, data=error.data, error_on_unknown_field=error_on_unknown_field
)
if source_path:
obj.resolve_location_for_diagnostics(source_path, resolve_value=True)
return obj
@classmethod
def from_validation_messages(
cls, errors: typing.Dict, data: typing.Dict, *, error_on_unknown_field: bool = False
) -> MutableValidationResult:
"""Create a validation result from error messages, which will be returned by marshmallow.Schema.validate.
:param errors: error message returned by marshmallow.Schema.validate.
:type errors: dict
:param data: serialized data to validate
:type data: dict
:keyword error_on_unknown_field: whether to raise error if there are unknown field diagnostics.
:paramtype error_on_unknown_field: bool
:return: The validation result
:rtype: MutableValidationResult
"""
instance = MutableValidationResult(target_obj=data)
errors = copy.deepcopy(errors)
cls._from_validation_messages_recursively(errors, [], instance, error_on_unknown_field=error_on_unknown_field)
return instance
@classmethod
def _from_validation_messages_recursively(
cls,
errors: typing.Union[typing.Dict, typing.List, str],
path_stack: typing.List[str],
instance: MutableValidationResult,
error_on_unknown_field: bool,
):
cur_path = ".".join(path_stack) if path_stack else "*"
# single error message
if isinstance(errors, dict) and "_schema" in errors:
instance.append_error(
message=";".join(errors["_schema"]),
yaml_path=cur_path,
)
# errors on attributes
elif isinstance(errors, dict):
for field, msgs in errors.items():
# fields.Dict
if field in ["key", "value"]:
cls._from_validation_messages_recursively(msgs, path_stack, instance, error_on_unknown_field)
else:
# Todo: Add hack logic here to deal with error message in nested TypeSensitiveUnionField in
# DataTransfer: will be a nested dict with None field as dictionary key.
# open a item to track: https://msdata.visualstudio.com/Vienna/_workitems/edit/2244262/
if field is None:
cls._from_validation_messages_recursively(msgs, path_stack, instance, error_on_unknown_field)
else:
path_stack.append(field)
cls._from_validation_messages_recursively(msgs, path_stack, instance, error_on_unknown_field)
path_stack.pop()
# detailed error message
elif isinstance(errors, list) and all(isinstance(msg, str) for msg in errors):
if cls.UNKNOWN_MESSAGE in errors and not error_on_unknown_field:
# Unknown field is not a real error, so we should remove it and append a warning.
errors.remove(cls.UNKNOWN_MESSAGE)
instance.append_warning(message=cls.UNKNOWN_MESSAGE, yaml_path=cur_path)
if errors:
instance.append_error(message=";".join(errors), yaml_path=cur_path)
# union field
elif isinstance(errors, list):
def msg2str(msg):
if isinstance(msg, str):
return msg
if isinstance(msg, dict) and len(msg) == 1 and "_schema" in msg and len(msg["_schema"]) == 1:
return msg["_schema"][0]
return str(msg)
instance.append_error(message="; ".join([msg2str(x) for x in errors]), yaml_path=cur_path)
# unknown error
else:
instance.append_error(message=str(errors), yaml_path=cur_path)
class _YamlLocationResolver:
def __init__(self, source_path):
self._source_path = source_path
def resolve(self, yaml_path, source_path=None):
"""Resolve the location & value of a yaml path starting from source_path.
:param yaml_path: yaml path.
:type yaml_path: str
:param source_path: source path.
:type source_path: str
:return: the location & value of the yaml path based on source_path.
:rtype: Tuple[str, str]
"""
source_path = source_path or self._source_path
if source_path is None or not os.path.isfile(source_path):
return None, None
if yaml_path is None or yaml_path == "*":
return source_path, None
attrs = yaml_path.split(".")
attrs.reverse()
return self._resolve_recursively(attrs, Path(source_path))
def _resolve_recursively(self, attrs: List[str], source_path: Path):
with open(source_path, encoding="utf-8") as f:
try:
loaded_yaml = strictyaml.load(f.read())
except Exception as e: # pylint: disable=broad-except
msg = "Can't load source file %s as a strict yaml:\n%s" % (source_path, str(e))
logger.debug(msg)
return None, None
while attrs:
attr = attrs[-1]
if loaded_yaml.is_mapping() and attr in loaded_yaml:
loaded_yaml = loaded_yaml.get(attr)
attrs.pop()
elif loaded_yaml.is_sequence() and attr.isdigit() and 0 <= int(attr) < len(loaded_yaml):
loaded_yaml = loaded_yaml[int(attr)]
attrs.pop()
else:
try:
# if current object is a path of a valid yaml file, try to resolve location in new source file
next_path = Path(loaded_yaml.value)
if not next_path.is_absolute():
next_path = source_path.parent / next_path
if next_path.is_file():
return self._resolve_recursively(attrs, source_path=next_path)
except OSError:
pass
except TypeError:
pass
# if not, return current section
break
return (
f"{source_path.resolve().absolute()}#line {loaded_yaml.start_line}",
None if attrs else loaded_yaml.value,
)
| promptflow/src/promptflow/promptflow/_sdk/entities/_validation/core.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/entities/_validation/core.py",
"repo_id": "promptflow",
"token_count": 9020
} | 40 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import typing
from pathlib import Path
from marshmallow import fields
from marshmallow.exceptions import FieldInstanceResolutionError, ValidationError
from marshmallow.fields import _T, Field, Nested
from marshmallow.utils import RAISE, resolve_field_instance
from promptflow._sdk._constants import BASE_PATH_CONTEXT_KEY
from promptflow._sdk.schemas._base import PathAwareSchema
from promptflow._utils.logger_utils import LoggerFactory
# pylint: disable=unused-argument,no-self-use,protected-access
module_logger = LoggerFactory.get_logger(__name__)
class StringTransformedEnum(Field):
def __init__(self, **kwargs):
# pop marshmallow unknown args to avoid warnings
self.allowed_values = kwargs.pop("allowed_values", None)
self.casing_transform = kwargs.pop("casing_transform", lambda x: x.lower())
self.pass_original = kwargs.pop("pass_original", False)
super().__init__(**kwargs)
if isinstance(self.allowed_values, str):
self.allowed_values = [self.allowed_values]
self.allowed_values = [self.casing_transform(x) for x in self.allowed_values]
def _jsonschema_type_mapping(self):
schema = {"type": "string", "enum": self.allowed_values}
if self.name is not None:
schema["title"] = self.name
if self.dump_only:
schema["readonly"] = True
return schema
def _serialize(self, value, attr, obj, **kwargs):
if not value:
return
if isinstance(value, str) and self.casing_transform(value) in self.allowed_values:
return value if self.pass_original else self.casing_transform(value)
raise ValidationError(f"Value {value!r} passed is not in set {self.allowed_values}")
def _deserialize(self, value, attr, data, **kwargs):
if isinstance(value, str) and self.casing_transform(value) in self.allowed_values:
return value if self.pass_original else self.casing_transform(value)
raise ValidationError(f"Value {value!r} passed is not in set {self.allowed_values}")
class LocalPathField(fields.Str):
"""A field that validates that the input is a local path.
Can only be used as fields of PathAwareSchema.
"""
default_error_messages = {
"invalid_path": "The filename, directory name, or volume label syntax is incorrect.",
"path_not_exist": "Can't find {allow_type} in resolved absolute path: {path}.",
}
def __init__(self, allow_dir=True, allow_file=True, **kwargs):
self._allow_dir = allow_dir
self._allow_file = allow_file
self._pattern = kwargs.get("pattern", None)
super().__init__(**kwargs)
def _resolve_path(self, value) -> Path:
"""Resolve path to absolute path based on base_path in context.
Will resolve the path if it's already an absolute path.
"""
try:
result = Path(value)
base_path = Path(self.context[BASE_PATH_CONTEXT_KEY])
if not result.is_absolute():
result = base_path / result
# for non-path string like "azureml:/xxx", OSError can be raised in either
# resolve() or is_dir() or is_file()
result = result.resolve()
if (self._allow_dir and result.is_dir()) or (self._allow_file and result.is_file()):
return result
except OSError:
raise self.make_error("invalid_path")
raise self.make_error("path_not_exist", path=result.as_posix(), allow_type=self.allowed_path_type)
@property
def allowed_path_type(self) -> str:
if self._allow_dir and self._allow_file:
return "directory or file"
if self._allow_dir:
return "directory"
return "file"
def _validate(self, value):
# inherited validations like required, allow_none, etc.
super(LocalPathField, self)._validate(value)
if value is None:
return
self._resolve_path(value)
def _serialize(self, value, attr, obj, **kwargs) -> typing.Optional[str]:
# do not block serializing None even if required or not allow_none.
if value is None:
return None
# always dump path as absolute path in string as base_path will be dropped after serialization
return super(LocalPathField, self)._serialize(self._resolve_path(value).as_posix(), attr, obj, **kwargs)
def _deserialize(self, value, attr, data, **kwargs):
# resolve to absolute path
if value is None:
return None
return super()._deserialize(self._resolve_path(value).as_posix(), attr, data, **kwargs)
# Note: Currently contains a bug where the order in which fields are inputted can potentially cause a bug
# Example, the first line below works, but the second one fails upon calling load_from_dict
# with the error " AttributeError: 'list' object has no attribute 'get'"
# inputs = UnionField([fields.List(NestedField(DataSchema)), NestedField(DataSchema)])
# inputs = UnionField([NestedField(DataSchema), fields.List(NestedField(DataSchema))])
class UnionField(fields.Field):
def __init__(self, union_fields: typing.List[fields.Field], is_strict=False, **kwargs):
super().__init__(**kwargs)
try:
# add the validation and make sure union_fields must be subclasses or instances of
# marshmallow.base.FieldABC
self._union_fields = [resolve_field_instance(cls_or_instance) for cls_or_instance in union_fields]
# TODO: make serialization/de-serialization work in the same way as json schema when is_strict is True
self.is_strict = is_strict # S\When True, combine fields with oneOf instead of anyOf at schema generation
except FieldInstanceResolutionError as error:
raise ValueError(
'Elements of "union_fields" must be subclasses or ' "instances of marshmallow.base.FieldABC."
) from error
@property
def union_fields(self):
return iter(self._union_fields)
def insert_union_field(self, field):
self._union_fields.insert(0, field)
# This sets the parent for the schema and also handles nesting.
def _bind_to_schema(self, field_name, schema):
super()._bind_to_schema(field_name, schema)
self._union_fields = self._create_bind_fields(self._union_fields, field_name)
def _create_bind_fields(self, _fields, field_name):
new_union_fields = []
for field in _fields:
field = copy.deepcopy(field)
field._bind_to_schema(field_name, self)
new_union_fields.append(field)
return new_union_fields
def _serialize(self, value, attr, obj, **kwargs):
if value is None:
return None
errors = []
for field in self._union_fields:
try:
return field._serialize(value, attr, obj, **kwargs)
except ValidationError as e:
errors.extend(e.messages)
except (TypeError, ValueError, AttributeError) as e:
errors.extend([str(e)])
raise ValidationError(message=errors, field_name=attr)
def _deserialize(self, value, attr, data, **kwargs):
errors = []
for schema in self._union_fields:
try:
return schema.deserialize(value, attr, data, **kwargs)
except ValidationError as e:
errors.append(e.normalized_messages())
except (FileNotFoundError, TypeError) as e:
errors.append([str(e)])
finally:
# Revert base path to original path when job schema fail to deserialize job. For example, when load
# parallel job with component file reference starting with FILE prefix, maybe first CommandSchema will
# load component yaml according to AnonymousCommandComponentSchema, and YamlFileSchema will update base
# path. When CommandSchema fail to load, then Parallelschema will load component yaml according to
# AnonymousParallelComponentSchema, but base path now is incorrect, and will raise path not found error
# when load component yaml file.
if (
hasattr(schema, "name")
and schema.name == "jobs"
and hasattr(schema, "schema")
and isinstance(schema.schema, PathAwareSchema)
):
# use old base path to recover original base path
schema.schema.context[BASE_PATH_CONTEXT_KEY] = schema.schema.old_base_path
# recover base path of parent schema
schema.context[BASE_PATH_CONTEXT_KEY] = schema.schema.context[BASE_PATH_CONTEXT_KEY]
raise ValidationError(errors, field_name=attr)
class NestedField(Nested):
"""anticipates the default coming in next marshmallow version, unknown=True."""
def __init__(self, *args, **kwargs):
if kwargs.get("unknown") is None:
kwargs["unknown"] = RAISE
super().__init__(*args, **kwargs)
class DumpableIntegerField(fields.Integer):
"""An int field that cannot serialize other type of values to int if self.strict."""
def _serialize(self, value, attr, obj, **kwargs) -> typing.Optional[typing.Union[str, _T]]:
if self.strict and not isinstance(value, int):
# this implementation can serialize bool to bool
raise self.make_error("invalid", input=value)
return super()._serialize(value, attr, obj, **kwargs)
class DumpableFloatField(fields.Float):
"""A float field that cannot serialize other type of values to float if self.strict."""
def __init__(
self,
*,
strict: bool = False,
allow_nan: bool = False,
as_string: bool = False,
**kwargs,
):
self.strict = strict
super().__init__(allow_nan=allow_nan, as_string=as_string, **kwargs)
def _validated(self, value):
if self.strict and not isinstance(value, float):
raise self.make_error("invalid", input=value)
return super()._validated(value)
def _serialize(self, value, attr, obj, **kwargs) -> typing.Optional[typing.Union[str, _T]]:
return super()._serialize(self._validated(value), attr, obj, **kwargs)
def PrimitiveValueField(**kwargs):
"""Function to return a union field for primitive value.
:return: The primitive value field
:rtype: Field
"""
return UnionField(
[
# Note: order matters here - to make sure value parsed correctly.
# By default, when strict is false, marshmallow downcasts float to int.
# Setting it to true will throw a validation error when loading a float to int.
# https://github.com/marshmallow-code/marshmallow/pull/755
# Use DumpableIntegerField to make sure there will be validation error when
# loading/dumping a float to int.
# note that this field can serialize bool instance but cannot deserialize bool instance.
DumpableIntegerField(strict=True),
# Use DumpableFloatField with strict of True to avoid '1'(str) serialized to 1.0(float)
DumpableFloatField(strict=True),
# put string schema after Int and Float to make sure they won't dump to string
fields.Str(),
# fields.Bool comes last since it'll parse anything non-falsy to True
fields.Bool(),
],
**kwargs,
)
| promptflow/src/promptflow/promptflow/_sdk/schemas/_fields.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/schemas/_fields.py",
"repo_id": "promptflow",
"token_count": 4708
} | 41 |
import re
from dataclasses import dataclass
from enum import Enum
from functools import partial
from pathlib import Path
from typing import Any, Callable
from promptflow._utils.multimedia_utils import is_multimedia_dict
class ResourceType(Enum):
"""
Enumeration of different types of multimedia resources.
We support path, URL, and base64 data.
"""
PATH = "path"
URL = "url"
BASE64 = "base64"
@dataclass
class MultimediaInfo:
"""
Data class that holds information about a multimedia resource.
"""
mime_type: str # The MIME type of the multimedia resource.
resource_type: ResourceType # The type of the resource as defined in ResourceType.
content: str # The content of the multimedia resource (path, URL, or base64 string).
class AbstractMultimediaFormatAdapter:
"""
Abstract base class for adapting multimedia formats.
This class provides an interface for extracting multimedia information
from various data formats or constructing data formats from multimedia information.
Subclasses should implement methods for specific contract version.
A MultimediaInfo object contains the mime_type, resource_type, and the actual content
of the multimedia resource.
The multimedia data is typically represented as a dictionary
with keys and values conforming to a specific multimedia data contract.
One multimedia data example from 20231201 version: {"data:image/jpg;path": "logo.jpg"}
"""
# Check if the original_data is a multimedia format according to the current contract version.
def is_valid_format(self, original_data: Any):
raise NotImplementedError()
def extract_info(self, original_data: Any) -> MultimediaInfo:
"""
Get the MultimediaInfo from the original data. Will include mime_type, resource_type, and content.
Below is an example for the 20231201 version:
{"data:image/jpg;path": "logo.jpg"} -> "image/jpg", "path", "logo.jpg"
"""
raise NotImplementedError()
def create_data(self, info: MultimediaInfo) -> Any:
"""
Create multimedia data from info. Below is an example for the 20231201 version:
"image/jpg", "path", "logo.jpg" -> {"data:image/jpg;path": "logo.jpg"}
"""
raise NotImplementedError()
class MultimediaFormatAdapter20231201(AbstractMultimediaFormatAdapter):
"""
20231201 version is our first contract's version, supports text and images (path/url/base64).
20231201 is the version number assigned by the customer in the YAML file.
Path format example: {"data:image/jpg;path": "logo.jpg"}
Url format example: {"data:image/jpg;url": "https://example.com/logo.jpg"}
Base64 format example: {"data:image/jpg;base64": "base64 string"}
"""
MIME_PATTERN = re.compile(r"^data:(.*);(path|base64|url)$")
def is_valid_format(self, original_data: Any):
return isinstance(original_data, dict) and is_multimedia_dict(original_data)
def extract_info(self, original_data: Any) -> MultimediaInfo:
if not self.is_valid_format(original_data):
return None
for key in original_data:
match = re.match(self.MIME_PATTERN, key)
if match:
mime_type, resource_type = match.group(1), match.group(2)
content = original_data[key]
return MultimediaInfo(mime_type, ResourceType(resource_type), content)
return None
def create_data(self, info: MultimediaInfo):
return {f"data:{info.mime_type};{info.resource_type.value}": info.content}
class AbstractMultimediaInfoConverter:
def convert(self, info: MultimediaInfo) -> MultimediaInfo:
"""
Change info's mime type/resource type/content based on the client's logic.
For cases that do not need to be changed, just return the original info.
:param info: The MultimediaInfo to be converted.
:type info: MultimediaInfo
:return: The converted MultimediaInfo.
:rtype: MultimediaInfo
"""
raise NotImplementedError()
class MultimediaConverter:
def __init__(self, flow_file: Path):
"""
Initialize the MultimediaConverter.
:param flow_file: The path to the YAML file. The YAML content will be used to determine the contract version.
:type flow_file: Path
"""
# TODO: check yaml content to determine the current contract version.
# Different contract version will have different multimedia format.
# The version exists in the yaml file, so we need to load the yaml to get version and init converter.
self.format_adapter = MultimediaFormatAdapter20231201()
def convert_content_recursively(self, content: Any, client_converter: AbstractMultimediaInfoConverter):
"""
Recursively converts multimedia data format in content.
:param content: The object that may contain multimedia data.
:type content: Any
:param client_converter: The converter to modify multimedia info based on the client's logic.
:type client_converter: AbstractMultimediaInfoConverter
:return: The content with changed multimedia format.
:rtype: Any
"""
process_func = partial(self._convert_content, converter=client_converter)
return self._process_content_recursively(content, process_func=process_func)
def _convert_content(self, original_data: Any, converter: AbstractMultimediaInfoConverter):
if not self.format_adapter.is_valid_format(original_data):
return original_data
info = self.format_adapter.extract_info(original_data)
# When can't extract multimedia info from original_data, return original_data directly.
if info is None:
return original_data
info = converter.convert(info)
return self.format_adapter.create_data(info)
def _process_content_recursively(self, content: Any, process_func: Callable):
if isinstance(content, list):
return [self._process_content_recursively(item, process_func) for item in content]
elif isinstance(content, dict):
if self.format_adapter.is_valid_format(content):
return process_func(original_data=content)
else:
return {k: self._process_content_recursively(v, process_func) for k, v in content.items()}
else:
return content
| promptflow/src/promptflow/promptflow/_utils/multimedia_data_converter.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/multimedia_data_converter.py",
"repo_id": "promptflow",
"token_count": 2299
} | 42 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from dataclasses import asdict, dataclass
from promptflow.azure._restclient.flow.models import ConnectionConfigSpec as RestConnectionConfigSpec
from promptflow.azure._restclient.flow.models import WorkspaceConnectionSpec as RestWorkspaceConnectionSpec
@dataclass
class ConnectionConfigSpec:
name: str
display_name: str
config_value_type: str
default_value: str = None
description: str = None
enum_values: list = None
is_optional: bool = False
@classmethod
def _from_rest_object(cls, rest_obj: RestConnectionConfigSpec):
return cls(
name=rest_obj.name,
display_name=rest_obj.display_name,
config_value_type=rest_obj.config_value_type,
default_value=rest_obj.default_value,
description=rest_obj.description,
enum_values=rest_obj.enum_values,
is_optional=rest_obj.is_optional,
)
def _to_dict(self):
return asdict(self, dict_factory=lambda x: {k: v for (k, v) in x if v is not None})
@dataclass
class WorkspaceConnectionSpec:
module: str
connection_type: str # Connection type example: AzureOpenAI
flow_value_type: str # Flow value type is the input.type on node, example: AzureOpenAIConnection
config_specs: list = None
@classmethod
def _from_rest_object(cls, rest_obj: RestWorkspaceConnectionSpec):
return cls(
config_specs=[
ConnectionConfigSpec._from_rest_object(config_spec) for config_spec in (rest_obj.config_specs or [])
],
module=rest_obj.module,
connection_type=rest_obj.connection_type,
flow_value_type=rest_obj.flow_value_type,
)
def _to_dict(self):
return asdict(self, dict_factory=lambda x: {k: v for (k, v) in x if v is not None})
| promptflow/src/promptflow/promptflow/azure/_entities/_workspace_connection_spec.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_entities/_workspace_connection_spec.py",
"repo_id": "promptflow",
"token_count": 771
} | 43 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._bulk_runs_operations import build_cancel_flow_run_request, build_clone_flow_from_flow_run_request, build_get_flow_child_runs_request, build_get_flow_node_run_base_path_request, build_get_flow_node_runs_request, build_get_flow_run_info_request, build_get_flow_run_log_content_request, build_submit_bulk_run_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class BulkRunsOperations:
"""BulkRunsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def submit_bulk_run(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
body: Optional["_models.SubmitBulkRunRequest"] = None,
**kwargs: Any
) -> str:
"""submit_bulk_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~flow.models.SubmitBulkRunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'SubmitBulkRunRequest')
else:
_json = None
request = build_submit_bulk_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.submit_bulk_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('str', pipeline_response)
if response.status_code == 204:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
submit_bulk_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/submit'} # type: ignore
@distributed_trace_async
async def cancel_flow_run(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_run_id: str,
**kwargs: Any
) -> str:
"""cancel_flow_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_flow_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.cancel_flow_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_flow_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/cancel'} # type: ignore
@distributed_trace_async
async def clone_flow_from_flow_run(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_run_id: str,
body: Optional["_models.CreateFlowRequest"] = None,
**kwargs: Any
) -> "_models.FlowDto":
"""clone_flow_from_flow_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param body:
:type body: ~flow.models.CreateFlowRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowDto, or the result of cls(response)
:rtype: ~flow.models.FlowDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateFlowRequest')
else:
_json = None
request = build_clone_flow_from_flow_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
content_type=content_type,
json=_json,
template_url=self.clone_flow_from_flow_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clone_flow_from_flow_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/clone'} # type: ignore
@distributed_trace_async
async def get_flow_run_info(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_run_id: str,
**kwargs: Any
) -> "_models.FlowRunInfo":
"""get_flow_run_info.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunInfo, or the result of cls(response)
:rtype: ~flow.models.FlowRunInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_run_info_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.get_flow_run_info.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_run_info.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}'} # type: ignore
@distributed_trace_async
async def get_flow_child_runs(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_run_id: str,
index: Optional[int] = None,
start_index: Optional[int] = None,
end_index: Optional[int] = None,
**kwargs: Any
) -> List[Any]:
"""get_flow_child_runs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param index:
:type index: int
:param start_index:
:type start_index: int
:param end_index:
:type end_index: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_child_runs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
index=index,
start_index=start_index,
end_index=end_index,
template_url=self.get_flow_child_runs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_child_runs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/childRuns'} # type: ignore
@distributed_trace_async
async def get_flow_node_runs(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_run_id: str,
node_name: str,
index: Optional[int] = None,
start_index: Optional[int] = None,
end_index: Optional[int] = None,
aggregation: Optional[bool] = False,
**kwargs: Any
) -> List[Any]:
"""get_flow_node_runs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param node_name:
:type node_name: str
:param index:
:type index: int
:param start_index:
:type start_index: int
:param end_index:
:type end_index: int
:param aggregation:
:type aggregation: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_node_runs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
node_name=node_name,
index=index,
start_index=start_index,
end_index=end_index,
aggregation=aggregation,
template_url=self.get_flow_node_runs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_node_runs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}'} # type: ignore
@distributed_trace_async
async def get_flow_node_run_base_path(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_run_id: str,
node_name: str,
**kwargs: Any
) -> "_models.FlowRunBasePath":
"""get_flow_node_run_base_path.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param node_name:
:type node_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunBasePath, or the result of cls(response)
:rtype: ~flow.models.FlowRunBasePath
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunBasePath"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_node_run_base_path_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
node_name=node_name,
template_url=self.get_flow_node_run_base_path.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunBasePath', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_node_run_base_path.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}/basePath'} # type: ignore
@distributed_trace_async
async def get_flow_run_log_content(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_run_id: str,
**kwargs: Any
) -> str:
"""get_flow_run_log_content.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_run_log_content_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.get_flow_run_log_content.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_run_log_content.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/logContent'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_bulk_runs_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_bulk_runs_operations.py",
"repo_id": "promptflow",
"token_count": 9909
} | 44 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_submit_bulk_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/submit')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_cancel_flow_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "text/plain, application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/cancel')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_clone_flow_from_flow_run_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/clone')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_flow_run_info_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_flow_child_runs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
index = kwargs.pop('index', None) # type: Optional[int]
start_index = kwargs.pop('start_index', None) # type: Optional[int]
end_index = kwargs.pop('end_index', None) # type: Optional[int]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/childRuns')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if index is not None:
query_parameters['index'] = _SERIALIZER.query("index", index, 'int')
if start_index is not None:
query_parameters['startIndex'] = _SERIALIZER.query("start_index", start_index, 'int')
if end_index is not None:
query_parameters['endIndex'] = _SERIALIZER.query("end_index", end_index, 'int')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_node_runs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
index = kwargs.pop('index', None) # type: Optional[int]
start_index = kwargs.pop('start_index', None) # type: Optional[int]
end_index = kwargs.pop('end_index', None) # type: Optional[int]
aggregation = kwargs.pop('aggregation', False) # type: Optional[bool]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if index is not None:
query_parameters['index'] = _SERIALIZER.query("index", index, 'int')
if start_index is not None:
query_parameters['startIndex'] = _SERIALIZER.query("start_index", start_index, 'int')
if end_index is not None:
query_parameters['endIndex'] = _SERIALIZER.query("end_index", end_index, 'int')
if aggregation is not None:
query_parameters['aggregation'] = _SERIALIZER.query("aggregation", aggregation, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_flow_node_run_base_path_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}/basePath')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
"nodeName": _SERIALIZER.url("node_name", node_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_flow_run_log_content_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/logContent')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"flowRunId": _SERIALIZER.url("flow_run_id", flow_run_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
# fmt: on
class BulkRunsOperations(object):
"""BulkRunsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def submit_bulk_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
body=None, # type: Optional["_models.SubmitBulkRunRequest"]
**kwargs # type: Any
):
# type: (...) -> str
"""submit_bulk_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param body:
:type body: ~flow.models.SubmitBulkRunRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'SubmitBulkRunRequest')
else:
_json = None
request = build_submit_bulk_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
template_url=self.submit_bulk_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('str', pipeline_response)
if response.status_code == 204:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
submit_bulk_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/submit'} # type: ignore
@distributed_trace
def cancel_flow_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""cancel_flow_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_cancel_flow_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.cancel_flow_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel_flow_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/cancel'} # type: ignore
@distributed_trace
def clone_flow_from_flow_run(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
body=None, # type: Optional["_models.CreateFlowRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.FlowDto"
"""clone_flow_from_flow_run.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param body:
:type body: ~flow.models.CreateFlowRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowDto, or the result of cls(response)
:rtype: ~flow.models.FlowDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateFlowRequest')
else:
_json = None
request = build_clone_flow_from_flow_run_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
content_type=content_type,
json=_json,
template_url=self.clone_flow_from_flow_run.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
clone_flow_from_flow_run.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/clone'} # type: ignore
@distributed_trace
def get_flow_run_info(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FlowRunInfo"
"""get_flow_run_info.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunInfo, or the result of cls(response)
:rtype: ~flow.models.FlowRunInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_run_info_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.get_flow_run_info.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_run_info.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}'} # type: ignore
@distributed_trace
def get_flow_child_runs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
index=None, # type: Optional[int]
start_index=None, # type: Optional[int]
end_index=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> List[Any]
"""get_flow_child_runs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param index:
:type index: int
:param start_index:
:type start_index: int
:param end_index:
:type end_index: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_child_runs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
index=index,
start_index=start_index,
end_index=end_index,
template_url=self.get_flow_child_runs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_child_runs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/childRuns'} # type: ignore
@distributed_trace
def get_flow_node_runs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
index=None, # type: Optional[int]
start_index=None, # type: Optional[int]
end_index=None, # type: Optional[int]
aggregation=False, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> List[Any]
"""get_flow_node_runs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param node_name:
:type node_name: str
:param index:
:type index: int
:param start_index:
:type start_index: int
:param end_index:
:type end_index: int
:param aggregation:
:type aggregation: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_node_runs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
node_name=node_name,
index=index,
start_index=start_index,
end_index=end_index,
aggregation=aggregation,
template_url=self.get_flow_node_runs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_node_runs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}'} # type: ignore
@distributed_trace
def get_flow_node_run_base_path(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
node_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FlowRunBasePath"
"""get_flow_node_run_base_path.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:param node_name:
:type node_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FlowRunBasePath, or the result of cls(response)
:rtype: ~flow.models.FlowRunBasePath
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRunBasePath"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_node_run_base_path_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
node_name=node_name,
template_url=self.get_flow_node_run_base_path.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('FlowRunBasePath', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_node_run_base_path.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/nodeRuns/{nodeName}/basePath'} # type: ignore
@distributed_trace
def get_flow_run_log_content(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_run_id, # type: str
**kwargs # type: Any
):
# type: (...) -> str
"""get_flow_run_log_content.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_run_id:
:type flow_run_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_flow_run_log_content_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_run_id=flow_run_id,
template_url=self.get_flow_run_log_content.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_flow_run_log_content.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/BulkRuns/{flowRunId}/logContent'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_bulk_runs_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_bulk_runs_operations.py",
"repo_id": "promptflow",
"token_count": 14864
} | 45 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
from pathlib import Path
from azure.ai.ml._schema import UnionField, YamlFileSchema
from azure.ai.ml._schema.core.fields import LocalPathField
from marshmallow import fields, post_load
from promptflow._utils.logger_utils import LoggerFactory
module_logger = LoggerFactory.get_logger(__name__)
class FlowSchema(YamlFileSchema):
name = fields.Str(attribute="name")
id = fields.Str(attribute="id")
description = fields.Str(attribute="description")
tags = fields.Dict(keys=fields.Str, attribute="tags")
path = UnionField(
[
LocalPathField(),
fields.Str(),
],
)
display_name = fields.Str(attribute="display_name")
type = fields.Str(attribute="type")
properties = fields.Dict(keys=fields.Str, attribute="properties")
@post_load
def update_properties(self, dct, **kwargs):
folder = Path(self.context["base_path"])
flow_type = dct.get("type")
if flow_type:
mapping = {
"standard": "default",
"evaluate": "evaluation",
}
dct["type"] = mapping[flow_type]
properties = dct.get("properties")
if properties and "promptflow.batch_inputs" in properties:
input_path = properties["promptflow.batch_inputs"]
samples_file = folder / input_path
if samples_file.exists():
with open(samples_file, "r", encoding="utf-8") as fp:
properties["promptflow.batch_inputs"] = json.loads(fp.read())
return dct
| promptflow/src/promptflow/promptflow/azure/_schemas/_flow_schema.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_schemas/_flow_schema.py",
"repo_id": "promptflow",
"token_count": 684
} | 46 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import re
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional
from promptflow._constants import LINE_NUMBER_KEY
from promptflow._core._errors import UnexpectedError
from promptflow._utils.load_data import load_data
from promptflow._utils.logger_utils import logger
from promptflow._utils.multimedia_utils import resolve_multimedia_data_recursively
from promptflow._utils.utils import resolve_dir_to_absolute
from promptflow.batch._errors import EmptyInputsData, InputMappingError
from promptflow.contracts.flow import FlowInputDefinition
class BatchInputsProcessor:
def __init__(
self,
working_dir: Path,
flow_inputs: Mapping[str, FlowInputDefinition],
max_lines_count: Optional[int] = None,
):
self._working_dir = working_dir
self._max_lines_count = max_lines_count
self._flow_inputs = flow_inputs
self._default_inputs_mapping = {key: f"${{data.{key}}}" for key in flow_inputs}
def process_batch_inputs(self, input_dirs: Dict[str, str], inputs_mapping: Dict[str, str]):
input_dicts = self._resolve_input_data(input_dirs)
no_input_data = all(len(data) == 0 for data in input_dicts.values())
if no_input_data:
input_dirs_str = "\n".join(f"{input}: {Path(path).as_posix()}" for input, path in input_dirs.items())
message_format = (
"Couldn't find any inputs data at the given input paths. Please review the provided path "
"and consider resubmitting.\n{input_dirs}"
)
raise EmptyInputsData(message_format=message_format, input_dirs=input_dirs_str)
return self._validate_and_apply_inputs_mapping(input_dicts, inputs_mapping)
def _resolve_input_data(self, input_dirs: Dict[str, str]):
"""Resolve input data from input dirs"""
result = {}
for input_key, input_dir in input_dirs.items():
input_dir = resolve_dir_to_absolute(self._working_dir, input_dir)
result[input_key] = self._resolve_data_from_input_path(input_dir)
return result
def _resolve_data_from_input_path(self, input_path: Path):
"""Resolve input data from directory"""
result = []
if input_path.is_file():
result.extend(resolve_multimedia_data_recursively(
input_path.parent,
load_data(local_path=input_path, max_rows_count=self._max_lines_count))
)
else:
for input_file in input_path.rglob("*"):
if input_file.is_file():
result.extend(resolve_multimedia_data_recursively(
input_file.parent,
load_data(local_path=input_file, max_rows_count=self._max_lines_count))
)
if self._max_lines_count and len(result) >= self._max_lines_count:
break
if self._max_lines_count and len(result) >= self._max_lines_count:
logger.warning(
(
"The data provided exceeds the maximum lines limit. Currently, only the first "
f"{self._max_lines_count} lines are processed."
)
)
return result[: self._max_lines_count]
return result
def _validate_and_apply_inputs_mapping(self, inputs, inputs_mapping) -> List[Dict[str, Any]]:
"""Validate and apply inputs mapping for all lines in the flow.
:param inputs: The inputs to the flow.
:type inputs: Any
:param inputs_mapping: The mapping of input names to their corresponding values.
:type inputs_mapping: Dict[str, Any]
:return: A list of dictionaries containing the resolved inputs for each line in the flow.
:rtype: List[Dict[str, Any]]
"""
if not inputs_mapping:
logger.warning(
msg=(
"Starting run without column mapping may lead to unexpected results. "
"Please consult the following documentation for more information: https://aka.ms/pf/column-mapping"
)
)
inputs_mapping = self._complete_inputs_mapping_by_default_value(inputs_mapping)
resolved_inputs = self._apply_inputs_mapping_for_all_lines(inputs, inputs_mapping)
return resolved_inputs
def _complete_inputs_mapping_by_default_value(self, inputs_mapping):
inputs_mapping = inputs_mapping or {}
result_mapping = self._default_inputs_mapping
# For input has default value, we don't try to read data from default mapping.
# Default value is in higher priority than default mapping.
for key, value in self._flow_inputs.items():
if value and value.default is not None:
del result_mapping[key]
result_mapping.update(inputs_mapping)
return result_mapping
def _apply_inputs_mapping_for_all_lines(
self,
input_dict: Mapping[str, List[Mapping[str, Any]]],
inputs_mapping: Mapping[str, str],
) -> List[Dict[str, Any]]:
"""Apply input mapping to all input lines.
For example:
input_dict = {
'data': [{'question': 'q1', 'answer': 'ans1'}, {'question': 'q2', 'answer': 'ans2'}],
'baseline': [{'answer': 'baseline_ans1'}, {'answer': 'baseline_ans2'}],
'output': [{'answer': 'output_ans1', 'line_number': 0}, {'answer': 'output_ans2', 'line_number': 1}],
}
inputs_mapping: {
"question": "${data.question}", # Question from the data
"groundtruth": "${data.answer}", # Answer from the data
"baseline": "${baseline.answer}", # Answer from the baseline
"deployment_name": "text-davinci-003", # literal value
"answer": "${output.answer}", # Answer from the output
"line_number": "${output.line_number}", # Answer from the output
}
Returns:
[{
"question": "q1",
"groundtruth": "ans1",
"baseline": "baseline_ans1",
"answer": "output_ans1",
"deployment_name": "text-davinci-003",
"line_number": 0,
},
{
"question": "q2",
"groundtruth": "ans2",
"baseline": "baseline_ans2",
"answer": "output_ans2",
"deployment_name": "text-davinci-003",
"line_number": 1,
}]
"""
if inputs_mapping is None:
# This exception should not happen since developers need to use _default_inputs_mapping for None input.
# So, this exception is one system error.
raise UnexpectedError(
message_format=(
"The input for batch run is incorrect. Please make sure to set up a proper input mapping before "
"proceeding. If you need additional help, feel free to contact support for further assistance."
)
)
merged_list = self._merge_input_dicts_by_line(input_dict)
if len(merged_list) == 0:
raise InputMappingError(
message_format=(
"The input for batch run is incorrect. Could not find one complete line on the provided input. "
"Please ensure that you supply data on the same line to resolve this issue."
)
)
result = [apply_inputs_mapping(item, inputs_mapping) for item in merged_list]
return result
def _merge_input_dicts_by_line(
self,
input_dict: Mapping[str, List[Mapping[str, Any]]],
) -> List[Mapping[str, Mapping[str, Any]]]:
for input_key, list_of_one_input in input_dict.items():
if not list_of_one_input:
raise InputMappingError(
message_format=(
"The input for batch run is incorrect. Input from key '{input_key}' is an empty list, "
"which means we cannot generate a single line input for the flow run. "
"Please rectify the input and try again."
),
input_key=input_key,
)
# Check if line numbers are aligned.
all_lengths_without_line_number = {
input_key: len(list_of_one_input)
for input_key, list_of_one_input in input_dict.items()
if not any(LINE_NUMBER_KEY in one_item for one_item in list_of_one_input)
}
if len(set(all_lengths_without_line_number.values())) > 1:
raise InputMappingError(
message_format=(
"The input for batch run is incorrect. Line numbers are not aligned. "
"Some lists have dictionaries missing the 'line_number' key, "
"and the lengths of these lists are different. "
"List lengths are: {all_lengths_without_line_number}. "
"Please make sure these lists have the same length or add 'line_number' key to each dictionary."
),
all_lengths_without_line_number=all_lengths_without_line_number,
)
# Collect each line item from each input.
tmp_dict = {}
for input_key, list_of_one_input in input_dict.items():
if input_key in all_lengths_without_line_number:
# Assume line_number start from 0.
for index, one_line_item in enumerate(list_of_one_input):
if index not in tmp_dict:
tmp_dict[index] = {}
tmp_dict[index][input_key] = one_line_item
else:
for one_line_item in list_of_one_input:
if LINE_NUMBER_KEY in one_line_item:
index = one_line_item[LINE_NUMBER_KEY]
if index not in tmp_dict:
tmp_dict[index] = {}
tmp_dict[index][input_key] = one_line_item
result = []
for line, values_for_one_line in tmp_dict.items():
# Missing input is not acceptable line.
if len(values_for_one_line) != len(input_dict):
continue
values_for_one_line[LINE_NUMBER_KEY] = line
result.append(values_for_one_line)
return result
def apply_inputs_mapping(
inputs: Mapping[str, Mapping[str, Any]],
inputs_mapping: Mapping[str, str],
) -> Dict[str, Any]:
"""Apply input mapping to inputs for new contract.
.. admonition:: Examples
.. code-block:: python
inputs: {
"data": {"answer": "I'm fine, thank you.", "question": "How are you?"},
"baseline": {"answer": "The weather is good."},
}
inputs_mapping: {
"question": "${data.question}",
"groundtruth": "${data.answer}",
"baseline": "${baseline.answer}",
"deployment_name": "literal_value",
}
Returns: {
"question": "How are you?",
"groundtruth": "I'm fine, thank you."
"baseline": "The weather is good.",
"deployment_name": "literal_value",
}
:param inputs: A mapping of input keys to their corresponding values.
:type inputs: Mapping[str, Mapping[str, Any]]
:param inputs_mapping: A mapping of input keys to their corresponding mapping expressions.
:type inputs_mapping: Mapping[str, str]
:return: A dictionary of input keys to their corresponding mapped values.
:rtype: Dict[str, Any]
:raises InputMappingError: If any of the input mapping relations are not found in the inputs.
"""
result = {}
notfound_mapping_relations = []
for map_to_key, map_value in inputs_mapping.items():
# Ignore reserved key configuration from input mapping.
if map_to_key == LINE_NUMBER_KEY:
continue
if not isinstance(map_value, str): # All non-string values are literal values.
result[map_to_key] = map_value
continue
match = re.search(r"^\${([^{}]+)}$", map_value)
if match is not None:
pattern = match.group(1)
# Could also try each pair of key value from inputs to match the pattern.
# But split pattern by '.' is one deterministic way.
# So, give key with less '.' higher priority.
splitted_str = pattern.split(".")
find_match = False
for i in range(1, len(splitted_str)):
key = ".".join(splitted_str[:i])
source = ".".join(splitted_str[i:])
if key in inputs and source in inputs[key]:
find_match = True
result[map_to_key] = inputs[key][source]
break
if not find_match:
notfound_mapping_relations.append(map_value)
else:
result[map_to_key] = map_value # Literal value
# Return all not found mapping relations in one exception to provide better debug experience.
if notfound_mapping_relations:
invalid_relations = ", ".join(notfound_mapping_relations)
raise InputMappingError(
message_format=(
"The input for batch run is incorrect. Couldn't find these mapping relations: {invalid_relations}. "
"Please make sure your input mapping keys and values match your YAML input section and input data. "
"For more information, refer to the following documentation: https://aka.ms/pf/column-mapping"
),
invalid_relations=invalid_relations,
)
# For PRS scenario, apply_inputs_mapping will be used for exec_line and line_number is not necessary.
if LINE_NUMBER_KEY in inputs:
result[LINE_NUMBER_KEY] = inputs[LINE_NUMBER_KEY]
return result
| promptflow/src/promptflow/promptflow/batch/_batch_inputs_processor.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/batch/_batch_inputs_processor.py",
"repo_id": "promptflow",
"token_count": 6425
} | 47 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# isort: skip_file
# skip to avoid circular import
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
from promptflow._sdk.entities._connection import (
AzureContentSafetyConnection,
AzureOpenAIConnection,
CognitiveSearchConnection,
CustomConnection,
OpenAIConnection,
SerpConnection,
QdrantConnection,
FormRecognizerConnection,
)
from promptflow._sdk.entities._run import Run
from promptflow._core.tool import InputSetting, DynamicList
from promptflow._sdk.entities._flow import FlowContext
__all__ = [
# region Connection
"AzureContentSafetyConnection",
"AzureOpenAIConnection",
"OpenAIConnection",
"CustomConnection",
"CognitiveSearchConnection",
"SerpConnection",
"QdrantConnection",
"FormRecognizerConnection",
# endregion
# region Run
"Run",
# endregion
# region Tool
"InputSetting",
"DynamicList",
# endregion
# region Flow
"FlowContext",
# endregion
]
| promptflow/src/promptflow/promptflow/entities/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/entities/__init__.py",
"repo_id": "promptflow",
"token_count": 384
} | 48 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import asyncio
import copy
import functools
import inspect
import os
import uuid
from pathlib import Path
from threading import current_thread
from types import GeneratorType
from typing import Any, Callable, Dict, List, Mapping, Optional, Tuple
from promptflow._constants import LINE_NUMBER_KEY
from promptflow._core._errors import NotSupported, UnexpectedError
from promptflow._core.cache_manager import AbstractCacheManager
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow._core.metric_logger import add_metric_logger, remove_metric_logger
from promptflow._core.openai_injector import inject_openai_api
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._core.tool import STREAMING_OPTION_PARAMETER_ATTR
from promptflow._core.tools_manager import ToolsManager
from promptflow._utils.context_utils import _change_working_dir
from promptflow._utils.execution_utils import (
apply_default_value_for_input,
collect_lines,
get_aggregation_inputs_properties,
)
from promptflow._utils.logger_utils import flow_logger, logger
from promptflow._utils.multimedia_utils import (
load_multimedia_data,
load_multimedia_data_recursively,
persist_multimedia_data,
)
from promptflow._utils.utils import get_int_env_var, transpose
from promptflow._utils.yaml_utils import load_yaml
from promptflow.contracts.flow import Flow, FlowInputDefinition, InputAssignment, InputValueType, Node
from promptflow.contracts.run_info import FlowRunInfo, Status
from promptflow.contracts.run_mode import RunMode
from promptflow.exceptions import PromptflowException
from promptflow.executor import _input_assignment_parser
from promptflow.executor._async_nodes_scheduler import AsyncNodesScheduler
from promptflow.executor._errors import (
InvalidFlowFileError,
NodeOutputNotFound,
OutputReferenceNotExist,
SingleNodeValidationError,
)
from promptflow.executor._flow_nodes_scheduler import (
DEFAULT_CONCURRENCY_BULK,
DEFAULT_CONCURRENCY_FLOW,
FlowNodesScheduler,
)
from promptflow.executor._result import AggregationResult, LineResult
from promptflow.executor._tool_resolver import ToolResolver
from promptflow.executor.flow_validator import FlowValidator
from promptflow.storage import AbstractRunStorage
from promptflow.storage._run_storage import DefaultRunStorage
class FlowExecutor:
"""This class is used to execute a single flow for different inputs.
:param flow: The flow to be executed.
:type flow: ~promptflow.contracts.flow.Flow
:param connections: The connections to be used for the flow.
:type connections: dict
:param run_tracker: The run tracker to be used for the flow.
:type run_tracker: ~promptflow._core.run_tracker.RunTracker
:param cache_manager: The cache manager to be used for the flow.
:type cache_manager: ~promptflow._core.cache_manager.AbstractCacheManager
:param loaded_tools: The loaded tools to be used for the flow.
:type loaded_tools: Mapping[str, Callable]
:param worker_count: The number of workers to be used for the flow. Default is 16.
:type worker_count: Optional[int]
:param raise_ex: Whether to raise exceptions or not. Default is False.
:type raise_ex: Optional[bool]
:param working_dir: The working directory to be used for the flow. Default is None.
:type working_dir: Optional[str]
:param line_timeout_sec: The line timeout in seconds to be used for the flow. Default is LINE_TIMEOUT_SEC.
:type line_timeout_sec: Optional[int]
:param flow_file: The flow file to be used for the flow. Default is None.
:type flow_file: Optional[Path]
"""
def __init__(
self,
flow: Flow,
connections: dict,
run_tracker: RunTracker,
cache_manager: AbstractCacheManager,
loaded_tools: Mapping[str, Callable],
*,
entry: Optional[str] = None,
raise_ex: bool = False,
working_dir=None,
line_timeout_sec=None,
flow_file=None,
):
"""Initialize a FlowExecutor object.
:param flow: The Flow object to execute.
:type flow: ~promptflow.contracts.flow.Flow
:param connections: The connections between nodes in the Flow.
:type connections: dict
:param run_tracker: The RunTracker object to track the execution of the Flow.
:type run_tracker: ~promptflow._core.run_tracker.RunTracker
:param cache_manager: The AbstractCacheManager object to manage caching of results.
:type cache_manager: ~promptflow._core.cache_manager.AbstractCacheManager
:param loaded_tools: A mapping of tool names to their corresponding functions.
:type loaded_tools: Mapping[str, Callable]
:param raise_ex: Whether to raise an exception if an error occurs during execution.
:type raise_ex: bool
:param working_dir: The working directory to use for execution.
:type working_dir: str or None
:param line_timeout_sec: The maximum time to wait for a line of output from a node.
:type line_timeout_sec: int or None
:param flow_file: The path to the file containing the Flow definition.
:type flow_file: str or None
"""
# Inject OpenAI API to make sure traces and headers injection works and
# update OpenAI API configs from environment variables.
inject_openai_api()
self._flow = flow
self._flow_id = flow.id or str(uuid.uuid4())
self._connections = connections
self._aggregation_inputs_references = get_aggregation_inputs_properties(flow)
self._aggregation_nodes = {node.name for node in self._flow.nodes if node.aggregation}
self._run_tracker = run_tracker
self._cache_manager = cache_manager
self._loaded_tools = loaded_tools
self._working_dir = working_dir
self._line_timeout_sec = line_timeout_sec or get_int_env_var("PF_LINE_TIMEOUT_SEC")
self._flow_file = flow_file
try:
self._tools_manager = ToolsManager(loaded_tools)
tool_to_meta = {tool.name: tool for tool in flow.tools}
custom_tools = {
node.name: self._tools_manager._load_custom_tool(tool_to_meta[node.tool], node.name)
for node in flow.nodes
if not self._tools_manager.loaded(node.name)
}
self._tools_manager.load_tools(custom_tools)
except PromptflowException as e:
# For PromptflowException, we don't wrap it, because need generate ErrorResponse by inner exception.
# Will try to find one common way to handle this case.
raise e
except Exception as e:
raise ValueError(f"Failed to load custom tools for flow due to exception:\n {e}.") from e
for node in flow.nodes:
self._tools_manager.assert_loaded(node.name)
self._entry = entry
self._raise_ex = raise_ex
self._log_interval = 60
self._processing_idx = None
self._completed_idx = None
# TODO: Improve the experience about configuring node concurrency.
self._node_concurrency = DEFAULT_CONCURRENCY_BULK
@classmethod
def create(
cls,
flow_file: Path,
connections: dict,
working_dir: Optional[Path] = None,
*,
entry: Optional[str] = None,
storage: Optional[AbstractRunStorage] = None,
raise_ex: bool = True,
node_override: Optional[Dict[str, Dict[str, Any]]] = None,
line_timeout_sec: Optional[int] = None,
) -> "FlowExecutor":
"""Create a new instance of FlowExecutor.
:param flow_file: The path to the flow file.
:type flow_file: Path
:param connections: The connections to be used for the flow.
:type connections: dict
:param working_dir: The working directory to be used for the flow. Default is None.
:type working_dir: Optional[str]
:param func: The function to be used for the flow if .py is provided. Default is None.
:type func: Optional[str]
:param storage: The storage to be used for the flow. Default is None.
:type storage: Optional[~promptflow.storage.AbstractRunStorage]
:param raise_ex: Whether to raise exceptions or not. Default is True.
:type raise_ex: Optional[bool]
:param node_override: The node overrides to be used for the flow. Default is None.
:type node_override: Optional[Dict[str, Dict[str, Any]]]
:param line_timeout_sec: The line timeout in seconds to be used for the flow. Default is LINE_TIMEOUT_SEC.
:type line_timeout_sec: Optional[int]
:return: A new instance of FlowExecutor.
:rtype: ~promptflow.executor.flow_executor.FlowExecutor
"""
if cls._is_eager_flow_yaml(flow_file, working_dir):
if Path(flow_file).suffix.lower() in [".yml", ".yaml"]:
entry, path = cls._parse_eager_flow_yaml(flow_file, working_dir)
flow_file = Path(path)
from ._script_executor import ScriptExecutor
return ScriptExecutor(
flow_file=flow_file,
entry=entry,
working_dir=working_dir,
storage=storage,
)
elif Path(flow_file).suffix.lower() in [".yml", ".yaml"]:
flow = Flow.from_yaml(flow_file, working_dir=working_dir)
return cls._create_from_flow(
flow_file=flow_file,
flow=flow,
connections=connections,
working_dir=working_dir,
entry=entry,
storage=storage,
raise_ex=raise_ex,
node_override=node_override,
line_timeout_sec=line_timeout_sec,
)
else:
raise InvalidFlowFileError(message_format="Unsupported flow file type: {flow_file}.", flow_file=flow_file)
@classmethod
def _create_from_flow(
cls,
flow: Flow,
connections: dict,
working_dir: Optional[Path],
*,
flow_file: Optional[Path] = None,
entry: Optional[str] = None,
storage: Optional[AbstractRunStorage] = None,
raise_ex: bool = True,
node_override: Optional[Dict[str, Dict[str, Any]]] = None,
line_timeout_sec: Optional[int] = None,
):
logger.debug("Start initializing the flow executor.")
working_dir = Flow._resolve_working_dir(flow_file, working_dir)
if node_override:
flow = flow._apply_node_overrides(node_override)
flow = flow._apply_default_node_variants()
package_tool_keys = [node.source.tool for node in flow.nodes if node.source and node.source.tool]
tool_resolver = ToolResolver(working_dir, connections, package_tool_keys)
with _change_working_dir(working_dir):
resolved_tools = [tool_resolver.resolve_tool_by_node(node) for node in flow.nodes]
flow = Flow(
flow.id, flow.name, [r.node for r in resolved_tools], inputs=flow.inputs, outputs=flow.outputs, tools=[]
)
# ensure_flow_valid including validation + resolve
# Todo: 1) split pure validation + resolve from below method 2) provide completed validation()
flow = FlowValidator._validate_nodes_topology(flow)
flow.outputs = FlowValidator._ensure_outputs_valid(flow)
if storage is None:
storage = DefaultRunStorage()
run_tracker = RunTracker(storage)
cache_manager = AbstractCacheManager.init_from_env()
executor = FlowExecutor(
flow=flow,
connections=connections,
run_tracker=run_tracker,
cache_manager=cache_manager,
loaded_tools={r.node.name: r.callable for r in resolved_tools},
entry=entry,
raise_ex=raise_ex,
working_dir=working_dir,
line_timeout_sec=line_timeout_sec,
flow_file=flow_file,
)
logger.debug("The flow executor is initialized successfully.")
return executor
@classmethod
def _is_eager_flow_yaml(cls, flow_file: Path, working_dir: Optional[Path] = None):
if Path(flow_file).suffix.lower() == ".py":
return True
elif Path(flow_file).suffix.lower() in [".yaml", ".yml"]:
flow_file = working_dir / flow_file if working_dir else flow_file
with open(flow_file, "r", encoding="utf-8") as fin:
flow_dag = load_yaml(fin)
if "entry" in flow_dag:
return True
return False
@classmethod
def _parse_eager_flow_yaml(cls, flow_file: Path, working_dir: Optional[Path] = None):
flow_file = working_dir / flow_file if working_dir else flow_file
with open(flow_file, "r", encoding="utf-8") as fin:
flow_dag = load_yaml(fin)
return flow_dag.get("entry", ""), flow_dag.get("path", "")
@classmethod
def load_and_exec_node(
cls,
flow_file: Path,
node_name: str,
*,
storage: AbstractRunStorage = None,
output_sub_dir: Optional[str] = None,
flow_inputs: Optional[Mapping[str, Any]] = None,
dependency_nodes_outputs: Optional[Mapping[str, Any]] = None,
connections: Optional[dict] = None,
working_dir: Optional[Path] = None,
raise_ex: bool = False,
):
"""Load and execute a single node from the flow.
:param flow_file: The path to the flow file.
:type flow_file: Path
:param node_name: The name of the node to be executed.
:type node_name: str
:param storage: The storage to be used for the flow.
:type storage: Optional[~promptflow.storage.AbstractRunStorage]
:param output_sub_dir: The directory to persist image for the flow. Keep it only for backward compatibility.
:type output_sub_dir: Optional[str]
:param flow_inputs: The inputs to be used for the flow. Default is None.
:type flow_inputs: Optional[Mapping[str, Any]]
:param dependency_nodes_outputs: The outputs of the dependency nodes. Default is None.
:type dependency_nodes_outputs: Optional[Mapping[str, Any]
:param connections: The connections to be used for the flow. Default is None.
:type connections: Optional[dict]
:param working_dir: The working directory to be used for the flow. Default is None.
:type working_dir: Optional[str]
:param raise_ex: Whether to raise exceptions or not. Default is False.
:type raise_ex: Optional[bool]
"""
# Inject OpenAI API to make sure traces and headers injection works and
# update OpenAI API configs from environment variables.
inject_openai_api()
OperationContext.get_instance().run_mode = RunMode.SingleNode.name
dependency_nodes_outputs = dependency_nodes_outputs or {}
# Load the node from the flow file
working_dir = Flow._resolve_working_dir(flow_file, working_dir)
with open(working_dir / flow_file, "r") as fin:
flow = Flow.deserialize(load_yaml(fin))
node = flow.get_node(node_name)
if node is None:
raise SingleNodeValidationError(
message_format=(
"Validation failed when attempting to execute the node. "
"Node '{node_name}' is not found in flow '{flow_file}'. "
"Please change node name or correct the flow file."
),
node_name=node_name,
flow_file=flow_file,
)
if not node.source or not node.type:
raise SingleNodeValidationError(
message_format=(
"Validation failed when attempting to execute the node. "
"Properties 'source' or 'type' are not specified for Node '{node_name}' in flow '{flow_file}'. "
"Please make sure these properties are in place and try again."
),
node_name=node_name,
flow_file=flow_file,
)
# Only load the node's referenced flow inputs
node_referenced_flow_inputs = FlowExecutor._get_node_referenced_flow_inputs(node, flow.inputs)
inputs_with_default_value = apply_default_value_for_input(node_referenced_flow_inputs, flow_inputs)
converted_flow_inputs_for_node = FlowValidator.convert_flow_inputs_for_node(
flow, node, inputs_with_default_value
)
inputs = load_multimedia_data(node_referenced_flow_inputs, converted_flow_inputs_for_node)
dependency_nodes_outputs = load_multimedia_data_recursively(dependency_nodes_outputs)
package_tool_keys = [node.source.tool] if node.source and node.source.tool else []
tool_resolver = ToolResolver(working_dir, connections, package_tool_keys)
resolved_node = tool_resolver.resolve_tool_by_node(node)
# Prepare callable and real inputs here
resolved_inputs = {}
for k, v in resolved_node.node.inputs.items():
value = _input_assignment_parser.parse_value(v, dependency_nodes_outputs, inputs)
resolved_inputs[k] = value
if resolved_node.node.aggregation:
# For aggregation node, we need to convert value to list.
if (
v.value_type == InputValueType.FLOW_INPUT
or v.value_type == InputValueType.NODE_REFERENCE
and flow.is_normal_node(v.value)
):
resolved_inputs[k] = [value]
# Note that the init args are only used when resolving the tool,
# so we need to remove them from the inputs before invoking.
resolved_inputs = {k: v for k, v in resolved_inputs.items() if k not in resolved_node.init_args}
if storage is None:
sub_dir = "." if output_sub_dir is None else output_sub_dir
storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path(sub_dir))
run_tracker = RunTracker(storage)
with run_tracker.node_log_manager:
# Will generate node run in context
context = FlowExecutionContext(
name=flow.name,
run_tracker=run_tracker,
cache_manager=AbstractCacheManager.init_from_env(),
)
try:
if inspect.iscoroutinefunction(resolved_node.callable):
asyncio.run(
context.invoke_tool_async(resolved_node.node, resolved_node.callable, kwargs=resolved_inputs),
)
else:
context.invoke_tool(resolved_node.node, resolved_node.callable, kwargs=resolved_inputs)
except Exception:
if raise_ex: # Only raise exception when raise_ex is True
raise
node_runs = run_tracker.collect_node_runs()
if len(node_runs) != 1:
# Should not happen except there is bug in run_tracker or thread control.
raise UnexpectedError(
message_format=(
"Single node execution failed. Expected one node result, "
"but received {node_result_num}. Please contact support for further assistance."
),
node_result_num=len(node_runs),
)
return node_runs[0]
@staticmethod
def update_environment_variables_with_connections(connections: dict):
"""Update environment variables with connections.
:param connections: A dictionary containing connection information.
:type connections: dict
:return: A dictionary containing updated environment variables.
:rtype: dict
"""
from promptflow._sdk._utils import update_environment_variables_with_connections
return update_environment_variables_with_connections(connections)
def convert_flow_input_types(self, inputs: dict) -> Mapping[str, Any]:
"""Convert the input types of the given inputs dictionary to match the expected types of the flow.
:param inputs: A dictionary containing the inputs to the flow.
:type inputs: dict
:return: A dictionary containing the converted inputs.
:rtype: Mapping[str, Any]
"""
return FlowValidator.resolve_flow_inputs_type(self._flow, inputs)
@property
def _default_inputs_mapping(self):
return {key: f"${{data.{key}}}" for key in self._flow.inputs}
@property
def has_aggregation_node(self) -> bool:
"""Check if the flow executor has any aggregation nodes.
:return: True if the flow executor has at least one aggregation node, False otherwise.
:rtype: bool
"""
return len(self._aggregation_nodes) > 0
@property
def aggregation_nodes(self):
"""Get the aggregation nodes of the flow executor.
:return: A list of aggregation nodes.
:rtype: list
"""
return self._aggregation_nodes
def _fill_lines(self, indexes, values, nlines):
"""Fill the values into the result list according to the indexes."""
result = [None] * nlines
for idx, value in zip(indexes, values):
result[idx] = value
return result
def _exec_aggregation_with_bulk_results(
self,
batch_inputs: List[dict],
results: List[LineResult],
run_id=None,
) -> AggregationResult:
if not self.aggregation_nodes:
return AggregationResult({}, {}, {})
logger.info("Executing aggregation nodes...")
run_infos = [r.run_info for r in results]
succeeded = [i for i, r in enumerate(run_infos) if r.status == Status.Completed]
succeeded_batch_inputs = [batch_inputs[i] for i in succeeded]
resolved_succeeded_batch_inputs = [
FlowValidator.ensure_flow_inputs_type(flow=self._flow, inputs=input) for input in succeeded_batch_inputs
]
succeeded_inputs = transpose(resolved_succeeded_batch_inputs, keys=list(self._flow.inputs.keys()))
aggregation_inputs = transpose(
[result.aggregation_inputs for result in results],
keys=self._aggregation_inputs_references,
)
succeeded_aggregation_inputs = collect_lines(succeeded, aggregation_inputs)
try:
aggr_results = self._exec_aggregation(succeeded_inputs, succeeded_aggregation_inputs, run_id)
logger.info("Finish executing aggregation nodes.")
return aggr_results
except PromptflowException as e:
# For PromptflowException, we already do classification, so throw directly.
raise e
except Exception as e:
error_type_and_message = f"({e.__class__.__name__}) {e}"
raise UnexpectedError(
message_format=(
"Unexpected error occurred while executing the aggregated nodes. "
"Please fix or contact support for assistance. The error details: {error_type_and_message}."
),
error_type_and_message=error_type_and_message,
) from e
@staticmethod
def _try_get_aggregation_input(val: InputAssignment, aggregation_inputs: dict):
if val.value_type != InputValueType.NODE_REFERENCE:
return val
serialized_val = val.serialize()
if serialized_val not in aggregation_inputs:
return val
return InputAssignment(value=aggregation_inputs[serialized_val])
def get_status_summary(self, run_id: str):
"""Get a summary of the status of a given run.
:param run_id: The ID of the run to get the status summary for.
:type run_id: str
:return: A summary of the status of the given run.
:rtype: str
"""
return self._run_tracker.get_status_summary(run_id)
def exec_aggregation(
self,
inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
run_id=None,
node_concurrency=DEFAULT_CONCURRENCY_FLOW,
) -> AggregationResult:
"""Execute the aggregation node of the flow.
:param inputs: A mapping of input names to their values.
:type inputs: Mapping[str, Any]
:param aggregation_inputs: A mapping of aggregation input names to their values.
:type aggregation_inputs: Mapping[str, Any]
:param run_id: The ID of the current run, if any.
:type run_id: Optional[str]
:param node_concurrency: The maximum number of nodes that can be executed concurrently.
:type node_concurrency: int
:return: The result of the aggregation node.
:rtype: ~promptflow.executor._result.AggregationResult
:raises: FlowError if the inputs or aggregation_inputs are invalid.
"""
self._node_concurrency = node_concurrency
aggregated_flow_inputs = dict(inputs or {})
aggregation_inputs = dict(aggregation_inputs or {})
FlowValidator._validate_aggregation_inputs(aggregated_flow_inputs, aggregation_inputs)
aggregated_flow_inputs = self._apply_default_value_for_aggregation_input(
self._flow.inputs, aggregated_flow_inputs, aggregation_inputs
)
# Resolve aggregated_flow_inputs from list of strings to list of objects, whose type is specified in yaml file.
# TODO: For now, we resolve type for batch run's aggregation input in _exec_aggregation_with_bulk_results.
# If we decide to merge the resolve logic into one place, remember to take care of index for batch run.
resolved_aggregated_flow_inputs = FlowValidator.resolve_aggregated_flow_inputs_type(
self._flow, aggregated_flow_inputs
)
with self._run_tracker.node_log_manager:
return self._exec_aggregation(resolved_aggregated_flow_inputs, aggregation_inputs, run_id)
@staticmethod
def _apply_default_value_for_aggregation_input(
inputs: Dict[str, FlowInputDefinition],
aggregated_flow_inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
):
aggregation_lines = 1
if aggregated_flow_inputs.values():
one_input_value = list(aggregated_flow_inputs.values())[0]
aggregation_lines = len(one_input_value)
# If aggregated_flow_inputs is empty, we should use aggregation_inputs to get the length.
elif aggregation_inputs.values():
one_input_value = list(aggregation_inputs.values())[0]
aggregation_lines = len(one_input_value)
for key, value in inputs.items():
if key not in aggregated_flow_inputs and (value and value.default is not None):
aggregated_flow_inputs[key] = [value.default] * aggregation_lines
return aggregated_flow_inputs
def _exec_aggregation(
self,
inputs: Mapping[str, Any],
aggregation_inputs: Mapping[str, Any],
run_id=None,
) -> AggregationResult:
if not self._flow.has_aggregation_node:
return AggregationResult({}, {}, {})
run_id = run_id or str(uuid.uuid4())
nodes = [copy.deepcopy(node) for node in self._flow.nodes if node.aggregation]
# Update the inputs of the aggregation nodes with the aggregation inputs.
for node in nodes:
node.inputs = {
k: FlowExecutor._try_get_aggregation_input(v, aggregation_inputs) for k, v in node.inputs.items()
}
# Load multimedia data for the flow inputs of aggregation nodes.
inputs = load_multimedia_data(self._flow.inputs, inputs)
# TODO: Use a new run tracker to avoid memory increase infinitely.
run_tracker = self._run_tracker
context = FlowExecutionContext(
name=self._flow.name,
run_tracker=run_tracker,
cache_manager=self._cache_manager,
run_id=run_id,
flow_id=self._flow_id,
)
metrics = {}
def _log_metric(key, value):
metrics[key] = value
add_metric_logger(_log_metric)
try:
self._submit_to_scheduler(context, inputs, nodes)
node_run_infos = run_tracker.collect_child_node_runs(run_id)
# Output is set as an empty dict, because the aggregation outputs story is not finalized.
return AggregationResult({}, metrics, {run.node: run for run in node_run_infos})
except Exception:
if self._raise_ex:
raise
node_run_infos = run_tracker.collect_child_node_runs(run_id)
return AggregationResult({}, metrics, {run.node: run for run in node_run_infos})
finally:
remove_metric_logger(_log_metric)
def exec(self, inputs: dict, node_concurrency=DEFAULT_CONCURRENCY_FLOW) -> dict:
"""Executes the flow with the given inputs and returns the output.
:param inputs: A dictionary containing the input values for the flow.
:type inputs: dict
:param node_concurrency: The maximum number of nodes that can be executed concurrently.
:type node_concurrency: int
:return: A dictionary containing the output values of the flow.
:rtype: dict
"""
self._node_concurrency = node_concurrency
inputs = apply_default_value_for_input(self._flow.inputs, inputs)
result = self._exec(inputs)
# TODO: remove this line once serving directly calling self.exec_line
self._add_line_results([result])
return result.output or {}
def _exec_in_thread(self, args) -> LineResult:
inputs, run_id, line_number, variant_id, validate_inputs = args
thread_name = current_thread().name
self._processing_idx[line_number] = thread_name
self._run_tracker._activate_in_context()
results = self._exec(
inputs, run_id=run_id, line_number=line_number, variant_id=variant_id, validate_inputs=validate_inputs
)
self._run_tracker._deactivate_in_context()
self._processing_idx.pop(line_number)
self._completed_idx[line_number] = thread_name
return results
def _extract_aggregation_inputs(self, nodes_outputs: dict):
return {
prop: self._extract_aggregation_input(nodes_outputs, prop) for prop in self._aggregation_inputs_references
}
def _extract_aggregation_input(self, nodes_outputs: dict, aggregation_input_property: str):
assign = InputAssignment.deserialize(aggregation_input_property)
return _input_assignment_parser.parse_value(assign, nodes_outputs, {})
def exec_line(
self,
inputs: Mapping[str, Any],
index: Optional[int] = None,
run_id: Optional[str] = None,
variant_id: str = "",
validate_inputs: bool = True,
node_concurrency=DEFAULT_CONCURRENCY_FLOW,
allow_generator_output: bool = False,
line_timeout_sec: Optional[int] = None,
) -> LineResult:
"""Execute a single line of the flow.
:param inputs: The input values for the line.
:type inputs: Mapping[str, Any]
:param index: The index of the line to execute.
:type index: Optional[int]
:param run_id: The ID of the flow run.
:type run_id: Optional[str]
:param variant_id: The ID of the variant to execute.
:type variant_id: str
:param validate_inputs: Whether to validate the input values.
:type validate_inputs: bool
:param node_concurrency: The maximum number of nodes that can be executed concurrently.
:type node_concurrency: int
:param allow_generator_output: Whether to allow generator output.
:type allow_generator_output: bool
:param line_timeout_sec: The maximum time to wait for a line of output.
:type line_timeout_sec: Optional[int]
:return: The result of executing the line.
:rtype: ~promptflow.executor._result.LineResult
"""
self._node_concurrency = node_concurrency
# TODO: Pass line_timeout_sec to flow node scheduler instead of updating self._line_timeout_sec
self._line_timeout_sec = line_timeout_sec or self._line_timeout_sec
inputs = apply_default_value_for_input(self._flow.inputs, inputs)
# For flow run, validate inputs as default
with self._run_tracker.node_log_manager:
# exec_line interface may be called when executing a batch run, so we only set run_mode as flow run when
# it is not set.
operation_context = OperationContext.get_instance()
operation_context.run_mode = operation_context.get("run_mode", None) or RunMode.Test.name
line_result = self._exec(
inputs,
run_id=run_id,
line_number=index,
variant_id=variant_id,
validate_inputs=validate_inputs,
allow_generator_output=allow_generator_output,
)
# Return line result with index
if index is not None and isinstance(line_result.output, dict):
line_result.output[LINE_NUMBER_KEY] = index
return line_result
def _add_line_results(self, line_results: List[LineResult], run_tracker: Optional[RunTracker] = None):
run_tracker = run_tracker or self._run_tracker
run_tracker._flow_runs.update({result.run_info.run_id: result.run_info for result in line_results})
run_tracker._node_runs.update(
{
node_run_info.run_id: node_run_info
for result in line_results
for node_run_info in result.node_run_infos.values()
}
)
@staticmethod
def _get_node_referenced_flow_inputs(
node, flow_inputs: Dict[str, FlowInputDefinition]
) -> Dict[str, FlowInputDefinition]:
node_referenced_flow_inputs = {}
for _, value in node.inputs.items():
# Only add flow input to node_referenced_flow_inputs when it is exist and referenced by node.
# If flow input is not exist, we will raise exception in FlowValidator.convert_flow_inputs_for_node.
if value.value_type == InputValueType.FLOW_INPUT and value.value in flow_inputs:
node_referenced_flow_inputs[value.value] = flow_inputs[value.value]
return node_referenced_flow_inputs
def _exec(
self,
inputs: Mapping[str, Any],
run_id: Optional[str] = None,
line_number: Optional[int] = None,
variant_id: str = "",
validate_inputs: bool = False,
allow_generator_output: bool = False,
) -> LineResult:
"""execute line run
Args:
inputs (Mapping): flow inputs
run_id: the id to identify the flow run
line_number: line number for batch inputs
validate_inputs:
Flag to indicate if input validation needed. It is used along with "_raise_ex" to
define if exception shall be raised if inputs validation (type check, etc) failed
The flag is True for Flow Run, False for bulk run as default
allow_generator_output:
Flag to indicate if generator output is allowed.
Returns:
LineResult: Line run result
"""
run_id = run_id or str(uuid.uuid4())
line_run_id = run_id if line_number is None else f"{run_id}_{line_number}"
run_tracker = RunTracker(
self._run_tracker._storage, self._run_tracker._run_mode, self._run_tracker.node_log_manager
)
# We need to copy the allow_generator_types from the original run_tracker.
run_tracker.allow_generator_types = self._run_tracker.allow_generator_types
run_info: FlowRunInfo = run_tracker.start_flow_run(
flow_id=self._flow_id,
root_run_id=run_id,
run_id=line_run_id,
parent_run_id=run_id,
inputs={k: inputs[k] for k in self._flow.inputs if k in inputs},
index=line_number,
variant_id=variant_id,
)
context = FlowExecutionContext(
name=self._flow.name,
run_tracker=run_tracker,
cache_manager=self._cache_manager,
run_id=run_id,
flow_id=self._flow_id,
line_number=line_number,
variant_id=variant_id,
)
output = {}
aggregation_inputs = {}
try:
if validate_inputs:
inputs = FlowValidator.ensure_flow_inputs_type(flow=self._flow, inputs=inputs, idx=line_number)
inputs = load_multimedia_data(self._flow.inputs, inputs)
# Make sure the run_info with converted inputs results rather than original inputs
run_info.inputs = inputs
output, nodes_outputs = self._traverse_nodes(inputs, context)
output = self._stringify_generator_output(output) if not allow_generator_output else output
# Persist the node runs for the nodes that have a generator output
generator_output_nodes = [
nodename for nodename, output in nodes_outputs.items() if isinstance(output, GeneratorType)
]
run_tracker.persist_selected_node_runs(run_info, generator_output_nodes)
run_tracker.allow_generator_types = allow_generator_output
run_tracker.end_run(line_run_id, result=output)
aggregation_inputs = self._extract_aggregation_inputs(nodes_outputs)
except KeyboardInterrupt as ex:
# Run will be cancelled when the process receives a SIGINT signal.
# KeyboardInterrupt will be raised after asyncio finishes its signal handling
# End run with the KeyboardInterrupt exception, so that its status will be Canceled
flow_logger.info("Received KeyboardInterrupt, cancel the run.")
run_tracker.end_run(line_run_id, ex=ex)
raise
except Exception as e:
run_tracker.end_run(line_run_id, ex=e)
if self._raise_ex:
raise
finally:
run_tracker._update_flow_run_info_with_node_runs(run_info)
run_tracker.persist_flow_run(run_info)
node_run_infos = run_tracker.collect_child_node_runs(line_run_id)
node_runs = {node_run.node: node_run for node_run in node_run_infos}
return LineResult(output, aggregation_inputs, run_info, node_runs)
def _extract_outputs(self, nodes_outputs, bypassed_nodes, flow_inputs):
outputs = {}
for name, output in self._flow.outputs.items():
if output.reference.value_type == InputValueType.LITERAL:
outputs[name] = output.reference.value
continue
if output.reference.value_type == InputValueType.FLOW_INPUT:
outputs[name] = flow_inputs[output.reference.value]
continue
if output.reference.value_type != InputValueType.NODE_REFERENCE:
raise NotSupported(
message_format=(
"The output type '{output_type}' is currently unsupported. "
"Please choose from available types: '{supported_output_type}' and try again."
),
output_type=output.reference.value_type.value
if hasattr(output.reference.value_type, "value")
else output.reference.value_type,
supported_output_type=[output_type.value for output_type in InputValueType],
)
node = next((n for n in self._flow.nodes if n.name == output.reference.value), None)
if not node:
raise OutputReferenceNotExist(
message_format=(
"The output '{output_name}' for flow is incorrect. The node '{node_name}' "
"referenced by the output '{output_name}' can not found in flow. "
"Please rectify the error in your flow and try again."
),
node_name=output.reference.value,
output_name=name,
)
if node.aggregation:
# Note that the reduce node referenced in the output is not supported.
continue
if node.name not in nodes_outputs:
raise NodeOutputNotFound(
message_format=(
"The output '{output_name}' for flow is incorrect. "
"No outputs found for node '{node_name}'. Please review the problematic "
"output and rectify the error."
),
output_name=name,
node_name=node.name,
)
if output.reference.value in bypassed_nodes:
flow_logger.warning(
f"The node referenced by output:'{output.reference.value}' is bypassed, which is not recommended."
)
node_result = nodes_outputs[output.reference.value]
outputs[name] = _input_assignment_parser.parse_node_property(
output.reference.value, node_result, output.reference.property
)
return outputs
def _should_use_async(self):
return (
all(inspect.iscoroutinefunction(f) for f in self._tools_manager._tools.values())
or os.environ.get("PF_USE_ASYNC", "false").lower() == "true"
)
def _traverse_nodes(self, inputs, context: FlowExecutionContext) -> Tuple[dict, dict]:
batch_nodes = [node for node in self._flow.nodes if not node.aggregation]
outputs = {}
# TODO: Use a mixed scheduler to support both async and thread pool mode.
if self._should_use_async():
flow_logger.info("Start executing nodes in async mode.")
scheduler = AsyncNodesScheduler(self._tools_manager, self._node_concurrency)
nodes_outputs, bypassed_nodes = asyncio.run(scheduler.execute(batch_nodes, inputs, context))
else:
flow_logger.info("Start executing nodes in thread pool mode.")
nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context, inputs, batch_nodes)
outputs = self._extract_outputs(nodes_outputs, bypassed_nodes, inputs)
return outputs, nodes_outputs
def _stringify_generator_output(self, outputs: dict):
for k, v in outputs.items():
if isinstance(v, GeneratorType):
outputs[k] = "".join(str(chuck) for chuck in v)
return outputs
def _submit_to_scheduler(self, context: FlowExecutionContext, inputs, nodes: List[Node]) -> Tuple[dict, dict]:
if not isinstance(self._node_concurrency, int):
raise UnexpectedError(
message_format=(
"Flow execution failed. To proceed, ensure that a valid node concurrency value is set. "
"The current value is {current_value}. Please contact support for further assistance."
),
current_value=self._node_concurrency,
)
return FlowNodesScheduler(
self._tools_manager,
inputs,
nodes,
self._node_concurrency,
context,
).execute(self._line_timeout_sec)
@staticmethod
def apply_inputs_mapping(
inputs: Mapping[str, Mapping[str, Any]],
inputs_mapping: Mapping[str, str],
) -> Dict[str, Any]:
# TODO: This function will be removed after the batch engine refactoring is completed.
from promptflow.batch._batch_inputs_processor import apply_inputs_mapping
return apply_inputs_mapping(inputs, inputs_mapping)
def enable_streaming_for_llm_flow(self, stream_required: Callable[[], bool]):
"""Enable the LLM node that is connected to output to return streaming results controlled by `stream_required`.
If the stream_required callback returns True, the LLM node will return a generator of strings.
Otherwise, the LLM node will return a string.
:param stream_required: A callback that takes no arguments and returns a boolean value indicating whether \
streaming results should be enabled for the LLM node.
:type stream_required: Callable[[], bool]
:return: None
"""
for node in self._flow.nodes:
streaming_option_parameter = self._parse_streaming_option_parameter(node)
if (
streaming_option_parameter is not None
and self._flow.is_referenced_by_flow_output(node)
and not self._flow.is_referenced_by_other_node(node)
):
wrapper = _inject_stream_options(stream_required, streaming_option_parameter)
self._tools_manager.wrap_tool(node.name, wrapper=wrapper)
def _parse_streaming_option_parameter(self, node: Node) -> Optional[str]:
if self._flow.is_llm_node(node):
return "stream"
tool_function = self._tools_manager.get_tool(node.name)
return getattr(tool_function, STREAMING_OPTION_PARAMETER_ATTR, None)
def ensure_flow_is_serializable(self):
"""Ensure that the flow is serializable.
Some of the nodes may return a generator of strings to create streaming outputs.
This is useful when the flow is deployed as a web service.
However, in the interactive mode, the executor assumes that the node result is JSON serializable.
This method adds a wrapper to each node in the flow
to consume the streaming outputs and merge them into a string for executor usage.
:return: None
"""
for node in self._flow.nodes:
self._tools_manager.wrap_tool(node.name, wrapper=_ensure_node_result_is_serializable)
def _inject_stream_options(should_stream: Callable[[], bool], streaming_option_parameter="stream"):
"""Inject the stream options to the decorated function.
AzureOpenAI.completion and AzureOpenAI.chat tools support both stream and non-stream mode.
The stream mode is controlled by the "stream" parameter.
"""
def stream_option_decorator(f):
# We only wrap the function if it has a "stream" parameter
signature = inspect.signature(f)
if streaming_option_parameter not in signature.parameters:
return f
@functools.wraps(f)
def wrapper(*args, **kwargs):
kwargs = kwargs or {}
kwargs.update({streaming_option_parameter: should_stream()})
return f(*args, **kwargs)
return wrapper
return stream_option_decorator
def enable_streaming_for_llm_tool(f):
"""Enable the stream mode for LLM tools that support it.
:param f: The function to wrap.
:type f: function
:return: The wrapped function.
:rtype: function
AzureOpenAI.completion and AzureOpenAI.chat tools support both stream and non-stream mode.
The stream mode is turned off by default. Use this wrapper to turn it on.
"""
# We only wrap the function if it has a "stream" parameter
signature = inspect.signature(f)
if "stream" not in signature.parameters:
return f
@functools.wraps(f)
def wrapper(*args, **kwargs):
kwargs = kwargs or {}
kwargs.update(stream=True)
return f(*args, **kwargs)
return wrapper
def _ensure_node_result_is_serializable(f):
"""Ensure the node result is serializable.
Some of the nodes may return a generator of strings to create streaming outputs.
This is useful when the flow is deployed as a web service.
However, in the interactive mode, the executor assumes that the node result is JSON serializable.
This wrapper ensures the node result is serializable
by consuming the data from the generator and merging them into a string.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
result = f(*args, **kwargs)
if isinstance(result, GeneratorType):
result = "".join(str(trunk) for trunk in result)
return result
return wrapper
def execute_flow(
flow_file: Path,
working_dir: Path,
output_dir: Path,
connections: dict,
inputs: Mapping[str, Any],
*,
run_aggregation: bool = True,
enable_stream_output: bool = False,
allow_generator_output: bool = False, # TODO: remove this
**kwargs,
) -> LineResult:
"""Execute the flow, including aggregation nodes.
:param flow_file: The path to the flow file.
:type flow_file: Path
:param working_dir: The working directory of the flow.
:type working_dir: Path
:param output_dir: Relative path relative to working_dir.
:type output_dir: Path
:param connections: A dictionary containing connection information.
:type connections: dict
:param inputs: A dictionary containing the input values for the flow.
:type inputs: Mapping[str, Any]
:param enable_stream_output: Whether to allow stream (generator) output for flow output. Default is False.
:type enable_stream_output: Optional[bool]
:param kwargs: Other keyword arguments to create flow executor.
:type kwargs: Any
:return: The line result of executing the flow.
:rtype: ~promptflow.executor._result.LineResult
"""
flow_executor = FlowExecutor.create(flow_file, connections, working_dir, raise_ex=False, **kwargs)
flow_executor.enable_streaming_for_llm_flow(lambda: enable_stream_output)
with _change_working_dir(working_dir):
# execute nodes in the flow except the aggregation nodes
# TODO: remove index=0 after UX no longer requires a run id similar to batch runs
# (run_id_index, eg. xxx_0) for displaying the interface
line_result = flow_executor.exec_line(inputs, index=0, allow_generator_output=allow_generator_output)
# persist the output to the output directory
line_result.output = persist_multimedia_data(line_result.output, base_dir=working_dir, sub_dir=output_dir)
if run_aggregation and line_result.aggregation_inputs:
# convert inputs of aggregation to list type
flow_inputs = {k: [v] for k, v in inputs.items()}
aggregation_inputs = {k: [v] for k, v in line_result.aggregation_inputs.items()}
aggregation_results = flow_executor.exec_aggregation(flow_inputs, aggregation_inputs=aggregation_inputs)
line_result.node_run_infos = {**line_result.node_run_infos, **aggregation_results.node_run_infos}
line_result.run_info.metrics = aggregation_results.metrics
if isinstance(line_result.output, dict):
# remove line_number from output
line_result.output.pop(LINE_NUMBER_KEY, None)
return line_result
| promptflow/src/promptflow/promptflow/executor/flow_executor.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/flow_executor.py",
"repo_id": "promptflow",
"token_count": 21318
} | 49 |
from types import GeneratorType
import pytest
from promptflow._utils.dataclass_serializer import serialize
from promptflow.contracts.run_info import Status
from promptflow.executor import FlowExecutor
from ..utils import get_yaml_file
@pytest.mark.usefixtures("dev_connections")
@pytest.mark.e2etest
class TestExecutorTraces:
def validate_openai_apicall(self, apicall: dict):
"""Validates an apicall dict.
Ensure that the trace output of openai api is a list of dicts.
Args:
apicall (dict): A dictionary representing apicall.
Raises:
AssertionError: If the API call is invalid.
"""
get_trace = False
if apicall.get("name", "") in (
"openai.api_resources.chat_completion.ChatCompletion.create",
"openai.api_resources.completion.Completion.create",
"openai.api_resources.embedding.Embedding.create",
"openai.resources.completions.Completions.create", # openai>=1.0.0
"openai.resources.chat.completions.Completions.create", # openai>=1.0.0
):
get_trace = True
output = apicall.get("output")
assert not isinstance(output, str)
assert isinstance(output, (list, dict))
if isinstance(output, list):
assert all(isinstance(item, dict) for item in output)
children = apicall.get("children", [])
if children is not None:
for child in children:
get_trace = get_trace or self.validate_openai_apicall(child)
return get_trace
def get_chat_input(stream):
return {
"question": "What is the capital of the United States of America?",
"chat_history": [],
"stream": stream,
}
def get_comletion_input(stream):
return {"prompt": "What is the capital of the United States of America?", "stream": stream}
@pytest.mark.parametrize(
"flow_folder, inputs",
[
("openai_chat_api_flow", get_chat_input(False)),
("openai_chat_api_flow", get_chat_input(True)),
("openai_completion_api_flow", get_comletion_input(False)),
("openai_completion_api_flow", get_comletion_input(True)),
("llm_tool", {"topic": "Hello", "stream": False}),
("llm_tool", {"topic": "Hello", "stream": True}),
],
)
def test_executor_openai_api_flow(self, flow_folder, inputs, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
assert "total_tokens" in flow_result.run_info.system_metrics
assert flow_result.run_info.system_metrics["total_tokens"] > 0
get_traced = False
for api_call in flow_result.run_info.api_calls:
get_traced = get_traced or self.validate_openai_apicall(serialize(api_call))
assert get_traced is True
def test_executor_generator_tools(self, dev_connections):
executor = FlowExecutor.create(get_yaml_file("generator_tools"), dev_connections)
inputs = {"text": "This is a test"}
flow_result = executor.exec_line(inputs)
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
tool_trace = flow_result.run_info.api_calls[0]["children"][0]
generator_trace = tool_trace.get("children")[0]
assert generator_trace is not None
output = generator_trace.get("output")
assert isinstance(output, list)
@pytest.mark.parametrize("allow_generator_output", [False, True])
def test_trace_behavior_with_generator_node(self, dev_connections, allow_generator_output):
"""Test to verify the trace output list behavior for a flow with a generator node.
This test checks the trace output list in two scenarios based on the 'allow_generator_output' flag:
- When 'allow_generator_output' is True, the output list should initially be empty until the generator is
consumed.
- When 'allow_generator_output' is False, the output list should contain items produced by the generator node.
The test ensures that the trace accurately reflects the generator's consumption status and helps in monitoring
and debugging flow execution.
"""
# Set up executor with a flow that contains a generator node
executor = FlowExecutor.create(get_yaml_file("generator_nodes"), dev_connections)
inputs = {"text": "This is a test"}
# Execute the flow with the given inputs and 'allow_generator_output' setting
flow_result = executor.exec_line(inputs, allow_generator_output=allow_generator_output)
# Verify that the flow execution result is a dictionary and the flow has completed successfully
assert isinstance(flow_result.output, dict)
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
# Extract the trace for the generator node
tool_trace = flow_result.run_info.api_calls[0]["children"][0]
generator_output_trace = tool_trace.get("output")
# Verify that the trace output is a list
assert isinstance(generator_output_trace, list)
if allow_generator_output:
# If generator output is allowed, the trace list should be empty before consumption
assert not generator_output_trace
# Obtain the generator from the flow result
answer_gen = flow_result.output.get("answer")
assert isinstance(answer_gen, GeneratorType)
# Consume the generator and check that it yields text
try:
generated_text = next(answer_gen)
assert isinstance(generated_text, str)
# Verify the trace list contains the most recently generated item
assert generator_output_trace[-1] == generated_text
except StopIteration:
assert False, "Generator did not generate any text"
else:
# If generator output is not allowed, the trace list should contain generated items
assert generator_output_trace
assert all(isinstance(item, str) for item in generator_output_trace)
@pytest.mark.parametrize("flow_file", ["flow_with_trace", "flow_with_trace_async"])
def test_flow_with_trace(self, flow_file, dev_connections):
"""Tests to verify the flows that contains @trace marks.
They should generate traces with "Function" type and nested in the "Tool" traces.
This test case is to verify a flow like following structure, both sync and async mode:
.. code-block::
flow (Flow, 1.5s)
greetings (Tool, 1.5s)
get_user_name (Function, 1.0s)
is_valid_name (Function, 0.5s)
format_greeting (Function, 0.5s)
"""
executor = FlowExecutor.create(get_yaml_file(flow_file), dev_connections)
inputs = {"user_id": 1}
flow_result = executor.exec_line(inputs)
# Assert the run status is completed
assert flow_result.output == {"output": "Hello, User 1!"}
assert flow_result.run_info.status == Status.Completed
assert flow_result.run_info.api_calls is not None
# Verify the traces are as expected
api_calls = flow_result.run_info.api_calls
assert len(api_calls) == 1
# Assert the "flow" root level trace
flow_trace = api_calls[0]
assert flow_trace["name"] == "flow"
assert flow_trace["type"] == "Flow"
assert flow_trace["end_time"] - flow_trace["start_time"] == pytest.approx(1.5, abs=0.3)
assert len(flow_trace["children"]) == 1
assert flow_trace["system_metrics"]["duration"] == pytest.approx(1.5, abs=0.3)
assert flow_trace["system_metrics"]["prompt_tokens"] == 0
assert flow_trace["system_metrics"]["completion_tokens"] == 0
assert flow_trace["system_metrics"]["total_tokens"] == 0
# TODO: These assertions should be fixed after added these fields to the top level trace
assert "inputs" not in flow_trace
assert "output" not in flow_trace
assert "error" not in flow_trace
# Assert the "greetings" tool
greetings_trace = flow_trace["children"][0]
assert greetings_trace["name"] == "greetings"
assert greetings_trace["type"] == "Tool"
assert greetings_trace["inputs"] == inputs
assert greetings_trace["output"] == {"greeting": "Hello, User 1!"}
assert greetings_trace["error"] is None
assert greetings_trace["children"] is not None
assert greetings_trace["end_time"] - greetings_trace["start_time"] == pytest.approx(1.5, abs=0.3)
assert len(greetings_trace["children"]) == 2
# TODO: to verfiy the system metrics. This might need to be fixed.
assert greetings_trace["system_metrics"] == {}
# Assert the "get_user_name" function
get_user_name_trace = greetings_trace["children"][0]
assert get_user_name_trace["name"] == "get_user_name"
assert get_user_name_trace["type"] == "Function"
assert get_user_name_trace["inputs"] == {"user_id": 1}
assert get_user_name_trace["output"] == "User 1"
assert get_user_name_trace["error"] is None
assert get_user_name_trace["end_time"] - get_user_name_trace["start_time"] == pytest.approx(1.0, abs=0.2)
assert len(get_user_name_trace["children"]) == 1
# TODO: to verfiy the system metrics. This might need to be fixed.
assert get_user_name_trace["system_metrics"] == {}
# Assert the "get_user_name/is_valid_name" function
is_valid_name_trace = get_user_name_trace["children"][0]
assert is_valid_name_trace["name"] == "is_valid_name"
assert is_valid_name_trace["type"] == "Function"
assert is_valid_name_trace["inputs"] == {"name": "User 1"}
assert is_valid_name_trace["output"] is True
assert is_valid_name_trace["error"] is None
# When running tests in MacOS, it will take longer. So we adjust abs to 0.15 and see if it needs to be extended.
assert is_valid_name_trace["end_time"] - is_valid_name_trace["start_time"] == pytest.approx(0.5, abs=0.15)
assert is_valid_name_trace["children"] == []
# TODO: to verfiy the system metrics. This might need to be fixed.
assert is_valid_name_trace["system_metrics"] == {}
# Assert the "format_greeting" function
format_greeting_trace = greetings_trace["children"][1]
assert format_greeting_trace["name"] == "format_greeting"
assert format_greeting_trace["type"] == "Function"
assert format_greeting_trace["inputs"] == {"user_name": "User 1"}
assert format_greeting_trace["output"] == "Hello, User 1!"
assert format_greeting_trace["error"] is None
# When running tests in MacOS, it will take longer. So we adjust abs to 0.15 and see if it needs to be extended.
assert format_greeting_trace["end_time"] - format_greeting_trace["start_time"] == pytest.approx(0.5, abs=0.15)
assert format_greeting_trace["children"] == []
# TODO: to verfiy the system metrics. This might need to be fixed.
assert format_greeting_trace["system_metrics"] == {}
| promptflow/src/promptflow/tests/executor/e2etests/test_traces.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/e2etests/test_traces.py",
"repo_id": "promptflow",
"token_count": 4733
} | 50 |
{
"tool_with_init_error": {
"class_name": "TestLoadErrorTool",
"function": "tool",
"inputs": {
"name": {"type": ["string"]}
},
"module": "tool_with_init_error",
"name": "Tool with init error",
"type": "python"
}
}
| promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error/package_tool_definition.json/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/package_tools/tool_with_init_error/package_tool_definition.json",
"repo_id": "promptflow",
"token_count": 151
} | 51 |
import pytest
from datetime import datetime
from dataclasses import dataclass
from typing import Dict, List
from promptflow._core.generator_proxy import GeneratorProxy
from promptflow._utils.dataclass_serializer import \
get_type, serialize, deserialize_dataclass, deserialize_value, assertEqual
from promptflow.contracts.run_info import RunInfo, Status
from promptflow._core.connection_manager import ConnectionManager
from promptflow.storage.run_records import NodeRunRecord
from unittest.mock import patch, Mock
import sys
def get_connection_dict():
return {
"azure_open_ai_connection": {
"type": "AzureOpenAIConnection",
"value": {
"api_key": "<azure-openai-key>",
"api_base": "<aoai-api-endpoint>",
"api_type": "azure",
"api_version": "2023-07-01-preview",
},
},
"custom_connection": {
"type": "CustomConnection",
"value": {
"api_key": "<your-key>",
"url": "<connection-endpoint>",
},
"module": "promptflow.connections",
"secret_keys": ["api_key"],
},
}
@pytest.mark.unittest
@pytest.mark.parametrize(
"type_input, expected",
[
(NodeRunRecord, NodeRunRecord),
([NodeRunRecord], List[NodeRunRecord]),
(dict(a=NodeRunRecord), Dict[str, NodeRunRecord]),
(int, int),
(str, str),
]
)
def test_get_type(type_input, expected):
assert get_type(type_input) == expected
@pytest.mark.unittest
def test_serialize_dataclass():
start_time = datetime(2023, 9, 4)
end_time = datetime(2023, 9, 4)
node_run_info = RunInfo(
node=None,
run_id=None,
flow_run_id=None,
status=Status.Completed,
inputs=None,
output=None,
metrics=None,
error=None,
parent_run_id=None,
start_time=start_time,
end_time=end_time,
index=0,
)
node_record = NodeRunRecord.from_run_info(node_run_info)
serialized_info = serialize(node_run_info)
serialized_record = serialize(node_record)
# test dataclass without serialize attribute
assert serialized_info['status'] == "Completed"
assert serialized_info['start_time'] == "2023-09-04T00:00:00Z"
assert deserialize_value(serialized_info, RunInfo) == node_run_info
# test dataclass with serialize attribute
assert serialized_record == node_record.serialize()
@pytest.mark.unittest
@pytest.mark.parametrize(
"value, value_type, expected",
[
(datetime(2023, 9, 4), datetime, "2023-09-04T00:00:00Z"),
(Status.Completed, Status, "Completed"),
([1, 2, 3], List[int], [1, 2, 3]),
({"a": 1, "b": 2}, Dict[str, int], {"a": 1, "b": 2}),
(1, int, 1),
("a", str, "a"),
]
)
def test_serialize_value(value, value_type, expected):
assert serialize(value) == expected
assert deserialize_value(serialize(value), value_type) == value
@pytest.mark.unittest
def test_serialize_remove_null():
value = {"a": 1, "b": None}
value_type = Dict[str, int]
assert deserialize_value(serialize(value, remove_null=True), value_type) == {"a": 1, "b": None}
@dataclass
class DummyDataClass:
name: str
age: int
assert serialize(DummyDataClass("Dummy", None), remove_null=True) == {'name': 'Dummy'}
@pytest.mark.unittest
def test_serialize_connection():
new_connection = get_connection_dict()
connection_manager = ConnectionManager(new_connection)
assert serialize(connection_manager.get("azure_open_ai_connection")) == "azure_open_ai_connection"
@pytest.mark.unittest
def test_serialize_generator():
def generator():
for i in range(3):
yield i
g = GeneratorProxy(generator())
next(g)
assert serialize(g) == [0]
@pytest.mark.unittest
@patch.dict('sys.modules', {'pydantic': None})
def test_import_pydantic_error():
# mock pydantic is not installed
class DummyClass:
def __init__(self, name, age):
self.name = name
self.age = age
dummy = DummyClass('Test', 20)
assert serialize(dummy) == dummy
@pytest.mark.unittest
@patch.dict('sys.modules', {'pydantic': Mock()})
def test_import_pydantic():
# mock pydantic is installed
class MockBaseModel:
def dict(self):
return {"key": "value"}
mock_value = MockBaseModel()
sys.modules['pydantic'].BaseModel = MockBaseModel
assert serialize(mock_value) == mock_value.dict()
assert serialize(123) == 123
@pytest.mark.unittest
def test_deserialize_dataclass():
# test when cls is not dataclass
with pytest.raises(ValueError):
deserialize_dataclass(int, 1)
# test when data is not a dict
with pytest.raises(ValueError):
deserialize_dataclass(NodeRunRecord, "NodeRunRecord")
@dataclass
class DummyDataClassWithDefault:
name: str = "Default Name"
age: int = 0
# test deserialize dataclass with default value
data = {"age": 25}
obj = deserialize_dataclass(DummyDataClassWithDefault, data)
assert obj.name == "Default Name"
assert obj.age == 25
@pytest.mark.unittest
@pytest.mark.parametrize(
"a, b, expected",
[
(1, 2, 1),
(Status.Completed, Status, Status.Completed),
(None, datetime, None),
("2022-01-01T00:00:00", datetime, datetime.fromisoformat("2022-01-01T00:00:00")),
]
)
def test_deserialize_value(a, b, expected):
assert deserialize_value(a, b) == expected
@pytest.mark.unittest
@pytest.mark.parametrize(
"a, b, path, are_equal",
[
# Test with identical dicts
({'key1': 'value1', 'key2': 'value2'}, {'key1': 'value1', 'key2': 'value2'}, \
"unittests/_utils/test_dataclass_serializer", True),
# Test with non-identical dicts
({'key1': 'value1', 'key2': 'value2'}, {'key1': 'value1', 'key3': 'value3'}, \
"unittests/_utils/test_dataclass_serializer", False),
# Test with identical lists
(['item1', 'item2'], ['item1', 'item2'], "", True),
# Test with non-identical lists
(['item1', 'item2'], ['item1', 'item3'], "", False),
# Test with other types
(1, 1, "", True),
(1, 2, "", False),
('string', 'string', "", True),
('string1', 'string2', "", False),
]
)
def test_assertEqual(a, b, path, are_equal):
if are_equal:
assertEqual(a, b, path)
else:
with pytest.raises(AssertionError):
assertEqual(a, b, path)
| promptflow/src/promptflow/tests/executor/unittests/_utils/test_dataclass_serializer.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_utils/test_dataclass_serializer.py",
"repo_id": "promptflow",
"token_count": 2910
} | 52 |
from datetime import datetime
import pytest
from promptflow.batch._result import BatchResult, ErrorSummary, LineError, SystemMetrics
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
from promptflow.contracts.run_info import Status
from promptflow.executor._result import AggregationResult, LineResult
def get_node_run_infos(node_dict: dict, index=None, api_calls=None, system_metrics=None):
return {
k: NodeRunInfo(
node=k,
flow_run_id="flow_run_id",
run_id=f"{index}_run_id_{k}",
status=v,
inputs=[],
output={},
metrics={},
error={"code": "UserError", "message": "test message"} if v == Status.Failed else None,
parent_run_id="",
start_time=None,
end_time=None,
index=index,
api_calls=api_calls,
system_metrics=system_metrics,
)
for k, v in node_dict.items()
}
def get_flow_run_info(status_dict: dict, index: int):
status = Status.Failed if any(status == Status.Failed for status in status_dict.values()) else Status.Completed
error = {"code": "UserError", "message": "test message"} if status == Status.Failed else None
return FlowRunInfo(
run_id=f"{index}_run_id",
status=status,
error=error,
inputs={},
output={},
metrics={},
request=None,
parent_run_id="",
root_run_id="",
source_run_id="",
flow_id="",
start_time=datetime.utcnow(),
end_time=datetime.utcnow(),
index=index,
)
def get_line_results(line_dict: dict, api_calls=None, system_metrics=None):
return [
LineResult(
output={},
aggregation_inputs={},
run_info=get_flow_run_info(status_dict=v, index=k),
node_run_infos=get_node_run_infos(node_dict=v, index=k, api_calls=api_calls, system_metrics=system_metrics),
)
for k, v in line_dict.items()
]
def get_aggregation_result(aggr_dict: dict, api_calls=None, system_metrics=None):
return AggregationResult(
output={},
metrics={},
node_run_infos=get_node_run_infos(node_dict=aggr_dict, api_calls=api_calls, system_metrics=system_metrics),
)
def get_batch_result(line_dict, aggr_dict, line_api_calls=None, aggr_api_calls=None):
line_results = get_line_results(line_dict=line_dict, api_calls=line_api_calls)
aggr_result = get_aggregation_result(aggr_dict=aggr_dict, api_calls=aggr_api_calls)
return BatchResult.create(datetime.utcnow(), datetime.utcnow(), line_results=line_results, aggr_result=aggr_result)
def get_api_call(type, name, inputs={}, output={}, children=None):
return {"type": type, "name": name, "inputs": inputs, "output": output, "children": children}
@pytest.mark.unittest
class TestBatchResult:
def test_node_status(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
aggr_dict = {"aggr_0": Status.Completed, "aggr_1": Status.Failed, "aggr_2": Status.Bypassed}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict=aggr_dict)
assert batch_result.total_lines == 3
assert batch_result.completed_lines == 2
assert batch_result.failed_lines == 1
assert batch_result.node_status == {
"node_0.completed": 3,
"node_1.completed": 2,
"node_1.failed": 1,
"node_2.completed": 2,
"node_2.bypassed": 1,
"aggr_0.completed": 1,
"aggr_1.failed": 1,
"aggr_2.bypassed": 1,
}
def test_system_metrics(self):
from openai.types.completion import Completion, CompletionChoice
line_dict = {0: {"node_0": Status.Completed}}
aggr_dict = {"aggr_0": Status.Completed}
api_call_1 = get_api_call(
"LLM",
"openai.resources.completions.Completions.create",
inputs={"prompt": "Please tell me a joke.", "model": "text-davinci-003"},
output={"choices": [{"text": "text"}]},
)
api_call_2 = get_api_call(
"LLM",
"openai.resources.completions.Completions.create",
inputs={
"prompt": ["Please tell me a joke.", "Please tell me a joke about fruit."],
"model": "text-davinci-003",
},
output=[
Completion(
choices=[CompletionChoice(text="text", finish_reason="stop", index=0, logprobs=None)],
id="id",
created=0,
model="model",
object="text_completion",
),
Completion(
choices=[CompletionChoice(text="text", finish_reason="stop", index=0, logprobs=None)],
id="id",
created=0,
model="model",
object="text_completion",
),
],
)
line_api_calls = get_api_call("Chain", "Chain", children=[api_call_1, api_call_2])
aggr_api_call = get_api_call(
"LLM",
"openai.resources.chat.completions.Completions.create",
inputs={
"messages": [{"system": "You are a helpful assistant.", "user": "Please tell me a joke."}],
"model": "gpt-35-turbo",
},
output={"choices": [{"message": {"content": "content"}}]},
)
batch_result = get_batch_result(
line_dict=line_dict, aggr_dict=aggr_dict, line_api_calls=[line_api_calls], aggr_api_calls=[aggr_api_call]
)
assert batch_result.system_metrics.total_tokens == 42
assert batch_result.system_metrics.prompt_tokens == 38
assert batch_result.system_metrics.completion_tokens == 4
system_metrics_dict = {
"total_tokens": 42,
"prompt_tokens": 38,
"completion_tokens": 4,
}
assert system_metrics_dict.items() <= batch_result.system_metrics.to_dict().items()
@pytest.mark.parametrize(
"api_call",
[
get_api_call("LLM", "Completion", inputs="invalid"),
get_api_call("LLM", "Completion", output="invalid"),
get_api_call("LLM", "Invalid"),
get_api_call("LLM", "Completion"),
get_api_call("LLM", "Completion", inputs={"api_type": "azure"}),
get_api_call("LLM", "ChatCompletion", inputs={"api_type": "azure", "engine": "invalid"}),
],
)
def test_invalid_api_calls(self, api_call):
line_dict = {0: {"node_0": Status.Completed}}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict={}, line_api_calls=[api_call])
assert batch_result.system_metrics.total_tokens == 0
assert batch_result.system_metrics.completion_tokens == 0
assert batch_result.system_metrics.prompt_tokens == 0
def test_error_summary(self):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
2: {"node_0": Status.Completed, "node_1": Status.Completed, "node_2": Status.Bypassed},
}
aggr_dict = {
"aggr_0": Status.Completed,
"aggr_1": Status.Failed,
"aggr_2": Status.Bypassed,
"aggr_4": Status.Failed,
}
batch_result = get_batch_result(line_dict=line_dict, aggr_dict=aggr_dict)
assert batch_result.total_lines == 3
assert batch_result.failed_lines == 1
assert batch_result.error_summary.failed_system_error_lines == 0
assert batch_result.error_summary.failed_user_error_lines == 1
assert batch_result.error_summary.error_list == [
LineError(line_number=1, error={"code": "UserError", "message": "test message"}),
]
assert batch_result.error_summary.error_list[0].to_dict() == {
"line_number": 1,
"error": {
"code": "UserError",
"message": "test message",
},
}
assert batch_result.error_summary.aggr_error_dict == {
"aggr_1": {"code": "UserError", "message": "test message"},
"aggr_4": {"code": "UserError", "message": "test message"},
}
@pytest.mark.unittest
class TestErrorSummary:
def test_create(self):
line_dict = {
0: {"node_0": Status.Failed, "node_1": Status.Completed, "node_2": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Failed, "node_2": Status.Completed},
}
line_results = get_line_results(line_dict)
line_results[0].run_info.error = {"code": "SystemError", "message": "test system error message"}
aggr_dict = {"aggr_0": Status.Completed, "aggr_1": Status.Failed}
aggr_result = get_aggregation_result(aggr_dict)
error_summary = ErrorSummary.create(line_results, aggr_result)
assert error_summary.failed_user_error_lines == 1
assert error_summary.failed_system_error_lines == 1
assert error_summary.error_list == [
LineError(line_number=0, error={"code": "SystemError", "message": "test system error message"}),
LineError(line_number=1, error={"code": "UserError", "message": "test message"}),
]
assert error_summary.aggr_error_dict == {"aggr_1": {"code": "UserError", "message": "test message"}}
@pytest.mark.unittest
class TestSystemMetrics:
def test_create(slef):
line_dict = {
0: {"node_0": Status.Completed, "node_1": Status.Completed},
1: {"node_0": Status.Completed, "node_1": Status.Completed},
}
line_system_metrics = {
"total_tokens": 5,
"prompt_tokens": 3,
"completion_tokens": 2,
}
line_results = get_line_results(line_dict, system_metrics=line_system_metrics)
aggr_dict = {"aggr_0": Status.Completed}
# invalid system metrics
aggr_system_metrics = {
"total_tokens": 10,
"prompt_tokens": 6,
}
aggr_result = get_aggregation_result(aggr_dict, system_metrics=aggr_system_metrics)
system_metrics = SystemMetrics.create(datetime.utcnow(), datetime.utcnow(), line_results, aggr_result)
assert system_metrics.total_tokens == 20
assert system_metrics.prompt_tokens == 12
assert system_metrics.completion_tokens == 8
system_metrics_dict = {
"total_tokens": 20,
"prompt_tokens": 12,
"completion_tokens": 8,
}
assert system_metrics_dict.items() <= system_metrics.to_dict().items()
| promptflow/src/promptflow/tests/executor/unittests/batch/test_result.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/batch/test_result.py",
"repo_id": "promptflow",
"token_count": 5308
} | 53 |
import pytest
from promptflow.contracts.flow import Flow, FlowInputDefinition
from promptflow.contracts.tool import ValueType
from promptflow.executor._errors import InputParseError, InputTypeError, InvalidAggregationInput, InvalidFlowRequest
from promptflow.executor.flow_validator import FlowValidator
from ...utils import WRONG_FLOW_ROOT, get_flow_from_folder
@pytest.mark.unittest
class TestFlowValidator:
@pytest.mark.parametrize(
"flow_folder, expected_node_order",
[
("unordered_nodes", ["first_node", "second_node", "third_node"]),
("unordered_nodes_with_skip", ["first_node", "second_node", "third_node"]),
("unordered_nodes_with_activate", ["first_node", "second_node", "third_node"]),
],
)
def test_ensure_nodes_order(self, flow_folder, expected_node_order):
flow = get_flow_from_folder(flow_folder)
flow = FlowValidator._ensure_nodes_order(flow)
node_order = [node.name for node in flow.nodes]
assert node_order == expected_node_order
@pytest.mark.parametrize(
"flow_folder, error_message",
[
(
"nodes_cycle",
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships for the nodes "
"['first_node', 'second_node'] and resolve the circular reference issue in the flow."
),
),
(
"nodes_cycle_with_activate",
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships "
"for the nodes ['first_node', 'second_node'] and resolve the circular reference issue in the flow."
),
),
(
"wrong_node_reference",
(
"Invalid node definitions found in the flow graph. Node 'second_node' references a non-existent "
"node 'third_node' in your flow. Please review your flow to ensure that the node "
"name is accurately specified."
),
),
(
"non_aggregation_reference_aggregation",
(
"Invalid node definitions found in the flow graph. Non-aggregate node 'test_node' "
"cannot reference aggregate nodes {'calculate_accuracy'}. Please review and rectify "
"the node reference."
),
),
(
"aggregation_activate_reference_non_aggregation",
(
"Invalid node definitions found in the flow graph. Non-aggregation node 'grade' cannot be "
"referenced in the activate config of the aggregation node 'calculate_accuracy'. Please "
"review and rectify the node reference."
),
),
],
)
def test_ensure_nodes_order_with_exception(self, flow_folder, error_message):
flow = get_flow_from_folder(flow_folder, root=WRONG_FLOW_ROOT)
with pytest.raises(InvalidFlowRequest) as e:
FlowValidator._ensure_nodes_order(flow)
assert str(e.value) == error_message, "Expected: {}, Actual: {}".format(error_message, str(e.value))
@pytest.mark.parametrize(
"aggregated_flow_inputs, aggregation_inputs, error_message",
[
(
{},
{
"input1": "value1",
},
"The input for aggregation is incorrect. "
"The value for aggregated reference input 'input1' should be a list, "
"but received str. Please adjust the input value to match the expected format.",
),
(
{
"input1": "value1",
},
{},
"The input for aggregation is incorrect. "
"The value for aggregated flow input 'input1' should be a list, "
"but received str. Please adjust the input value to match the expected format.",
),
(
{"input1": ["value1_1", "value1_2"]},
{"input_2": ["value2_1"]},
"The input for aggregation is incorrect. The length of all aggregated inputs should be the same. "
"Current input lengths are: {'input1': 2, 'input_2': 1}. "
"Please adjust the input value in your input data.",
),
(
{
"input1": "value1",
},
{
"input1": "value1",
},
"The input for aggregation is incorrect. "
"The input 'input1' appears in both aggregated flow input and aggregated reference input. "
"Please remove one of them and try the operation again.",
),
],
)
def test_validate_aggregation_inputs_error(self, aggregated_flow_inputs, aggregation_inputs, error_message):
with pytest.raises(InvalidAggregationInput) as e:
FlowValidator._validate_aggregation_inputs(aggregated_flow_inputs, aggregation_inputs)
assert str(e.value) == error_message
@pytest.mark.parametrize(
"flow_folder",
["simple_flow_with_python_tool_and_aggregate"],
)
def test_ensure_outputs_valid_with_aggregation(self, flow_folder):
flow = get_flow_from_folder(flow_folder)
assert flow.outputs["content"] is not None
assert flow.outputs["aggregate_content"] is not None
flow.outputs = FlowValidator._ensure_outputs_valid(flow)
print(flow.outputs)
assert flow.outputs["content"] is not None
assert flow.outputs.get("aggregate_content") is None
@pytest.mark.parametrize(
"flow_folder, inputs, index, error_type, error_message",
[
(
"flow_with_list_input",
{"key": "['hello']"},
None,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'key' was "
"interpreted as JSON string since its type is 'list'. However, the value "
"'['hello']' is invalid for JSON parsing. Error details: (JSONDecodeError) "
"Expecting value: line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
(
"flow_with_list_input",
{"key": "['hello']"},
0,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'key' in line 0 of input data was "
"interpreted as JSON string since its type is 'list'. However, the value "
"'['hello']' is invalid for JSON parsing. Error details: (JSONDecodeError) "
"Expecting value: line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
],
)
def test_resolve_flow_inputs_type_json_error_for_list_type(
self, flow_folder, inputs, index, error_type, error_message
):
flow = get_flow_from_folder(flow_folder)
with pytest.raises(error_type) as exe_info:
FlowValidator.resolve_flow_inputs_type(flow, inputs, idx=index)
assert error_message == exe_info.value.message
@pytest.mark.parametrize(
"inputs, expected_result",
[({"test_input": ["1", "2"]}, {"test_input": [1, 2]})],
)
def test_resolve_aggregated_flow_inputs_type(self, inputs, expected_result):
flow = Flow(
id="fakeId",
name=None,
nodes=[],
inputs={"test_input": FlowInputDefinition(type=ValueType.INT)},
outputs=None,
tools=[],
)
result = FlowValidator.resolve_aggregated_flow_inputs_type(flow, inputs)
assert result == expected_result
@pytest.mark.parametrize(
"inputs, expected_message",
[
(
{"test_input": ["1", "str"]},
(
"The input for flow is incorrect. The value for flow input 'test_input' in line 1 of input data "
"does not match the expected type 'int'. "
"Please change flow input type or adjust the input value in your input data."
),
)
],
)
def test_resolve_aggregated_flow_inputs_type_error(self, inputs, expected_message):
flow = Flow(
id="fakeId",
name=None,
nodes=[],
inputs={"test_input": FlowInputDefinition(type=ValueType.INT)},
outputs=None,
tools=[],
)
with pytest.raises(InputTypeError) as ex:
FlowValidator.resolve_aggregated_flow_inputs_type(flow, inputs)
assert expected_message == str(ex.value)
@pytest.mark.parametrize(
"input, type, expected_result",
[
("1", ValueType.INT, 1),
('["1", "2"]', ValueType.LIST, ["1", "2"]),
],
)
def test_parse_input_value(self, input, type, expected_result):
input_key = "test_input"
result = FlowValidator._parse_input_value(input_key, input, type)
assert result == expected_result
@pytest.mark.parametrize(
"input, type, index, error_type, expected_message",
[
(
"str",
ValueType.INT,
None,
InputTypeError,
(
"The input for flow is incorrect. The value for flow input 'my_input' does not match the expected "
"type 'int'. Please change flow input type or adjust the input value in your input data."
),
),
(
"['1', '2']",
ValueType.LIST,
None,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'my_input' was interpreted as JSON "
"string since its type is 'list'. However, the value '['1', '2']' is invalid for JSON parsing. "
"Error details: (JSONDecodeError) Expecting value: line 1 column 2 (char 1). "
"Please make sure your inputs are properly formatted."
),
),
(
"str",
ValueType.INT,
10,
InputTypeError,
(
"The input for flow is incorrect. The value for flow input 'my_input' in line 10 of "
"input data does not match the expected type 'int'. "
"Please change flow input type or adjust the input value in your input data."
),
),
(
"['1', '2']",
ValueType.LIST,
10,
InputParseError,
(
"Failed to parse the flow input. The value for flow input 'my_input' in line 10 of input data "
"was interpreted as JSON string since its type is 'list'. However, the value '['1', '2']' is "
"invalid for JSON parsing. Error details: (JSONDecodeError) Expecting value: "
"line 1 column 2 (char 1). Please make sure your inputs are properly formatted."
),
),
],
)
def test_parse_input_value_error(self, input, type, index, error_type, expected_message):
input_key = "my_input"
with pytest.raises(error_type) as ex:
FlowValidator._parse_input_value(input_key, input, type, index)
assert expected_message == str(ex.value)
| promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_validator.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/executor/test_flow_validator.py",
"repo_id": "promptflow",
"token_count": 5912
} | 54 |
import os
import sys
import timeit
from typing import Callable
from unittest import mock
import pytest
from promptflow._cli._user_agent import USER_AGENT as CLI_USER_AGENT # noqa: E402
from promptflow._sdk._telemetry import log_activity
from promptflow._sdk._utils import ClientUserAgentUtil
from sdk_cli_azure_test.recording_utilities import is_replay
FLOWS_DIR = "./tests/test_configs/flows"
DATAS_DIR = "./tests/test_configs/datas"
def mock_log_activity(*args, **kwargs):
custom_message = "github run: https://github.com/microsoft/promptflow/actions/runs/{0}".format(
os.environ.get("GITHUB_RUN_ID")
)
if len(args) == 4:
if args[3] is not None:
args[3]["custom_message"] = custom_message
else:
args = list(args)
args[3] = {"custom_message": custom_message}
elif "custom_dimensions" in kwargs and kwargs["custom_dimensions"] is not None:
kwargs["custom_dimensions"]["custom_message"] = custom_message
else:
kwargs["custom_dimensions"] = {"custom_message": custom_message}
return log_activity(*args, **kwargs)
def run_cli_command(cmd, time_limit=3600):
from promptflow._cli._pf_azure.entry import main
sys.argv = list(cmd)
st = timeit.default_timer()
with mock.patch.object(ClientUserAgentUtil, "get_user_agent") as get_user_agent_fun, mock.patch(
"promptflow._sdk._telemetry.activity.log_activity", side_effect=mock_log_activity
), mock.patch("promptflow._cli._pf_azure.entry.log_activity", side_effect=mock_log_activity):
# Client side will modify user agent only through ClientUserAgentUtil to avoid impact executor/runtime.
get_user_agent_fun.return_value = f"{CLI_USER_AGENT} perf_monitor/1.0"
user_agent = ClientUserAgentUtil.get_user_agent()
assert user_agent == f"{CLI_USER_AGENT} perf_monitor/1.0"
main()
ed = timeit.default_timer()
print(f"{cmd}, \nTotal time: {ed - st}s")
if is_replay():
assert ed - st < time_limit, f"The time limit is {time_limit}s, but it took {ed - st}s."
@pytest.fixture
def operation_scope_args(subscription_id: str, resource_group_name: str, workspace_name: str):
return [
"--subscription",
subscription_id,
"--resource-group",
resource_group_name,
"--workspace-name",
workspace_name,
]
@pytest.mark.perf_monitor_test
@pytest.mark.usefixtures(
"mock_get_azure_pf_client",
"mock_set_headers_with_user_aml_token",
"single_worker_thread_pool",
"vcr_recording",
)
class TestAzureCliPerf:
def test_pfazure_run_create(self, operation_scope_args, randstr: Callable[[str], str], time_limit=15):
name = randstr("name")
run_cli_command(
cmd=(
"pfazure",
"run",
"create",
"--flow",
f"{FLOWS_DIR}/print_input_flow",
"--data",
f"{DATAS_DIR}/print_input_flow.jsonl",
"--name",
name,
*operation_scope_args,
),
time_limit=time_limit,
)
def test_pfazure_run_update(self, operation_scope_args, time_limit=15):
run_cli_command(
cmd=(
"pfazure",
"run",
"update",
"--name",
"test_run",
"--set",
"display_name=test_run",
"description='test_description'",
"tags.key1=value1",
*operation_scope_args,
),
time_limit=time_limit,
)
def test_run_restore(self, operation_scope_args, time_limit=15):
run_cli_command(
cmd=(
"pfazure",
"run",
"restore",
"--name",
"test_run",
*operation_scope_args,
),
time_limit=time_limit,
)
| promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_azure_cli_perf.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_azure_cli_perf.py",
"repo_id": "promptflow",
"token_count": 1950
} | 55 |
import copy
import pytest
def build_from_data_and_assert(data, expected):
from azure.ai.ml._restclient.v2023_06_01_preview.models import WorkspaceConnectionPropertiesV2BasicResource
from promptflow.azure.operations._arm_connection_operations import ArmConnectionOperations
data = copy.deepcopy(data)
obj = WorkspaceConnectionPropertiesV2BasicResource.deserialize(data)
assert ArmConnectionOperations.build_connection_dict_from_rest_object("mock", obj) == expected
@pytest.mark.unittest
def test_build_azure_openai_connection_from_rest_object():
# Test on ApiKey type with AzureOpenAI category
data = {
"id": "mock_id",
"name": "azure_open_ai_connection",
"type": "Microsoft.MachineLearningServices/workspaces/connections",
"properties": {
"authType": "ApiKey",
"credentials": {"key": "***"},
"category": "AzureOpenAI",
"target": "<api-base>",
"metadata": {
"azureml.flow.connection_type": "AzureOpenAI",
"azureml.flow.module": "promptflow.connections",
"apiType": "azure",
"ApiVersion": "2023-07-01-preview",
"ResourceId": "mock_id",
},
},
}
expected = {
"type": "AzureOpenAIConnection",
"module": "promptflow.connections",
"value": {
"api_base": "<api-base>",
"api_key": "***",
"api_type": "azure",
"api_version": "2023-07-01-preview",
"resource_id": "mock_id",
},
}
build_from_data_and_assert(data, expected)
@pytest.mark.unittest
def test_build_default_azure_openai_connection_missing_metadata():
# Test on ApiKey type with AzureOpenAI category
data = {
"id": "mock_id",
"name": "azure_open_ai_connection",
"type": "Microsoft.MachineLearningServices/workspaces/connections",
"properties": {
"authType": "ApiKey",
"credentials": {"key": "***"},
"category": "AzureOpenAI",
"target": "<api-base>",
"metadata": {
# Missing ApiType and ApiVersion
# "ApiType": "azure",
# "ApiVersion": "2023-07-01-preview",
},
},
}
expected = {
"type": "AzureOpenAIConnection",
"module": "promptflow.connections",
"value": {
"api_base": "<api-base>",
"api_key": "***",
# Assert below keys are filtered out
# "api_type": None,
# "api_version": None,
},
}
build_from_data_and_assert(data, expected)
@pytest.mark.unittest
def test_build_custom_keys_connection_from_rest_object():
# Test on CustomKeys type with CustomConnection category
data = {
"id": "mock_id",
"name": "custom_connection",
"type": "Microsoft.MachineLearningServices/workspaces/connections",
"properties": {
"authType": "CustomKeys",
"credentials": {"keys": {"my_key1": "***", "my_key2": "***"}},
"category": "CustomKeys",
"target": "<api-base>",
"metadata": {
"azureml.flow.connection_type": "Custom",
"azureml.flow.module": "promptflow.connections",
"general_key": "general_value",
},
},
}
expected = {
"type": "CustomConnection",
"module": "promptflow.connections",
"value": {"my_key1": "***", "my_key2": "***", "general_key": "general_value"},
"secret_keys": ["my_key1", "my_key2"],
}
build_from_data_and_assert(data, expected)
@pytest.mark.unittest
def test_build_cognitive_search_connection_from_rest_object():
# Test on ApiKey type with CognitiveSearch category
data = {
"tags": None,
"location": None,
"id": "mock_id",
"name": "test",
"type": "Microsoft.MachineLearningServices/workspaces/connections",
"properties": {
"authType": "ApiKey",
"credentials": {"key": "***"},
"category": "CognitiveSearch",
"expiryTime": None,
"target": "mock_target",
"metadata": {
"azureml.flow.connection_type": "CognitiveSearch",
"azureml.flow.module": "promptflow.connections",
"ApiVersion": "2023-07-01-Preview",
},
},
}
expected = {
"type": "CognitiveSearchConnection",
"module": "promptflow.connections",
"value": {"api_key": "***", "api_base": "mock_target", "api_version": "2023-07-01-Preview"},
}
build_from_data_and_assert(data, expected)
@pytest.mark.unittest
def test_build_cognitive_service_category_connection_from_rest_object():
# Test on Api type with CognitiveService category
data = {
"id": "mock_id",
"name": "ACS_Connection",
"type": "Microsoft.MachineLearningServices/workspaces/connections",
"properties": {
"authType": "ApiKey",
"credentials": {"key": "***"},
"category": "CognitiveService",
"target": "mock_target",
"metadata": {
"azureml.flow.connection_type": "AzureContentSafety",
"azureml.flow.module": "promptflow.connections",
"Kind": "Content Safety",
"ApiVersion": "2023-04-30-preview",
},
},
}
expected = {
"type": "AzureContentSafetyConnection",
"module": "promptflow.connections",
"value": {"api_key": "***", "endpoint": "mock_target", "api_version": "2023-04-30-preview"},
}
build_from_data_and_assert(data, expected)
# Test category + kind as connection type
del data["properties"]["metadata"]["azureml.flow.connection_type"]
build_from_data_and_assert(data, expected)
@pytest.mark.unittest
def test_build_connection_missing_metadata():
data = {
"id": "mock_id",
"name": "ACS_Connection",
"type": "Microsoft.MachineLearningServices/workspaces/connections",
"properties": {
"authType": "ApiKey",
"credentials": {"key": "***"},
"category": "CognitiveService",
"target": "mock_target",
"metadata": {
"ApiVersion": "2023-04-30-preview",
},
},
}
with pytest.raises(Exception) as e:
build_from_data_and_assert(data, {})
assert "is not recognized in PromptFlow" in str(e.value)
@pytest.mark.unittest
def test_build_connection_unknown_category():
data = {
"id": "mock_id",
"name": "ACS_Connection",
"type": "Microsoft.MachineLearningServices/workspaces/connections",
"properties": {
"authType": "ApiKey",
"credentials": {"key": "***"},
"category": "Unknown",
"target": "mock_target",
"metadata": {
"azureml.flow.connection_type": "AzureContentSafety",
"azureml.flow.module": "promptflow.connections",
"Kind": "Content Safety",
"ApiVersion": "2023-04-30-preview",
},
},
}
with pytest.raises(Exception) as e:
build_from_data_and_assert(data, {})
assert "Unknown connection mock category Unknown" in str(e.value)
| promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_arm_connection_build.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_arm_connection_build.py",
"repo_id": "promptflow",
"token_count": 3536
} | 56 |
import importlib.util
import json
import sys
from pathlib import Path
from unittest.mock import patch
import pytest
from promptflow import ToolProvider, tool
from promptflow._core.tool_meta_generator import ToolValidationError
from promptflow._sdk._pf_client import PFClient
from promptflow.entities import DynamicList, InputSetting
from promptflow.exceptions import UserErrorException
PROMOTFLOW_ROOT = Path(__file__) / "../../../.."
TEST_ROOT = Path(__file__).parent.parent.parent
TOOL_ROOT = TEST_ROOT / "test_configs/tools"
_client = PFClient()
@pytest.mark.e2etest
class TestTool:
def get_tool_meta(self, tool_path):
module_name = f"test_tool.{Path(tool_path).stem}"
# Load the module from the file path
spec = importlib.util.spec_from_file_location(module_name, tool_path)
module = importlib.util.module_from_spec(spec)
# Load the module's code
spec.loader.exec_module(module)
tools_meta, _ = _client.tools._generate_tool_meta(module)
return tools_meta
def test_python_tool_meta(self):
tool_path = TOOL_ROOT / "python_tool.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.python_tool.PythonTool.python_tool": {
"class_name": "PythonTool",
"function": "python_tool",
"inputs": {"connection": {"type": ["AzureOpenAIConnection"]}, "input1": {"type": ["string"]}},
"module": "test_tool.python_tool",
"name": "PythonTool.python_tool",
"type": "python",
},
"test_tool.python_tool.my_python_tool": {
"function": "my_python_tool",
"inputs": {"input1": {"type": ["string"]}},
"module": "test_tool.python_tool",
"name": "python_tool",
"type": "python",
},
"test_tool.python_tool.my_python_tool_without_name": {
"function": "my_python_tool_without_name",
"inputs": {"input1": {"type": ["string"]}},
"module": "test_tool.python_tool",
"name": "my_python_tool_without_name",
"type": "python",
},
}
assert tool_meta == expect_tool_meta
def test_llm_tool_meta(self):
tool_path = TOOL_ROOT / "custom_llm_tool.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.custom_llm_tool.my_tool": {
"name": "My Custom LLM Tool",
"type": "custom_llm",
"inputs": {"connection": {"type": ["CustomConnection"]}},
"description": "This is a tool to demonstrate the custom_llm tool type",
"module": "test_tool.custom_llm_tool",
"function": "my_tool",
"enable_kwargs": True,
},
"test_tool.custom_llm_tool.TestCustomLLMTool.tool_func": {
"name": "My Custom LLM Tool",
"type": "custom_llm",
"inputs": {"connection": {"type": ["AzureOpenAIConnection"]}, "api": {"type": ["string"]}},
"description": "This is a tool to demonstrate the custom_llm tool type",
"module": "test_tool.custom_llm_tool",
"class_name": "TestCustomLLMTool",
"function": "tool_func",
"enable_kwargs": True,
},
}
assert tool_meta == expect_tool_meta
def test_invalid_tool_type(self):
with pytest.raises(UserErrorException) as exception:
@tool(name="invalid_tool_type", type="invalid_type")
def invalid_tool_type():
pass
assert exception.value.message == "Tool type invalid_type is not supported yet."
def test_tool_with_custom_connection(self):
tool_path = TOOL_ROOT / "tool_with_custom_connection.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_custom_connection.MyTool.my_tool": {
"name": "My Second Tool",
"type": "python",
"inputs": {"connection": {"type": ["CustomConnection"]}, "input_text": {"type": ["string"]}},
"description": "This is my second tool",
"module": "test_tool.tool_with_custom_connection",
"class_name": "MyTool",
"function": "my_tool",
}
}
assert tool_meta == expect_tool_meta
tool_path = TOOL_ROOT / "tool_with_custom_strong_type_connection.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_custom_strong_type_connection.my_tool": {
"name": "Tool With Custom Strong Type Connection",
"type": "python",
"inputs": {
"connection": {"type": ["CustomConnection"], "custom_type": ["MyCustomConnection"]},
"input_text": {"type": ["string"]},
},
"description": "This is my tool with custom strong type connection.",
"module": "test_tool.tool_with_custom_strong_type_connection",
"function": "my_tool",
}
}
assert tool_meta == expect_tool_meta
def test_tool_with_input_settings(self):
tool_path = TOOL_ROOT / "tool_with_dynamic_list_input.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_dynamic_list_input.my_tool": {
"description": "This is my tool with dynamic list input",
"function": "my_tool",
"inputs": {
"endpoint_name": {
"dynamic_list": {
"func_kwargs": [
{
"default": "",
"name": "prefix",
"optional": True,
"reference": "${inputs.input_prefix}",
"type": ["string"],
}
],
"func_path": "test_tool.tool_with_dynamic_list_input.list_endpoint_names",
},
"type": ["string"],
},
"input_prefix": {"type": ["string"]},
"input_text": {
"allow_manual_entry": True,
"dynamic_list": {
"func_kwargs": [
{
"default": "",
"name": "prefix",
"optional": True,
"reference": "${inputs.input_prefix}",
"type": ["string"],
},
{"default": 10, "name": "size", "optional": True, "type": ["int"]},
],
"func_path": "test_tool.tool_with_dynamic_list_input.my_list_func",
},
"is_multi_select": True,
"type": ["list"],
},
},
"module": "test_tool.tool_with_dynamic_list_input",
"name": "My Tool with Dynamic List Input",
"type": "python",
}
}
assert tool_meta == expect_tool_meta
tool_path = TOOL_ROOT / "tool_with_enabled_by_value.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_enabled_by_value.my_tool": {
"name": "My Tool with Enabled By Value",
"type": "python",
"inputs": {
"user_type": {"type": ["string"], "enum": ["student", "teacher"]},
"student_id": {"type": ["string"], "enabled_by": "user_type", "enabled_by_value": ["student"]},
"teacher_id": {"type": ["string"], "enabled_by": "user_type", "enabled_by_value": ["teacher"]},
},
"description": "This is my tool with enabled by value",
"module": "test_tool.tool_with_enabled_by_value",
"function": "my_tool",
}
}
assert tool_meta == expect_tool_meta
def test_dynamic_list_with_invalid_reference(self):
def my_list_func(prefix: str, size: int = 10):
pass
# value in reference doesn't exist in tool inputs
invalid_dynamic_list_setting = DynamicList(function=my_list_func, input_mapping={"prefix": "invalid_input"})
input_settings = {
"input_text": InputSetting(
dynamic_list=invalid_dynamic_list_setting, allow_manual_entry=True, is_multi_select=True
)
}
@tool(
name="My Tool with Dynamic List Input",
description="This is my tool with dynamic list input",
input_settings=input_settings,
)
def my_tool(input_text: list, input_prefix: str) -> str:
return f"Hello {input_prefix} {','.join(input_text)}"
with pytest.raises(ToolValidationError) as exception:
_client.tools.validate(my_tool, raise_error=True)
assert "Cannot find invalid_input in the tool inputs." in exception.value.message
# invalid dynamic func input
invalid_dynamic_list_setting = DynamicList(
function=my_list_func, input_mapping={"invalid_input": "input_prefix"}
)
input_settings = {
"input_text": InputSetting(
dynamic_list=invalid_dynamic_list_setting, allow_manual_entry=True, is_multi_select=True
)
}
@tool(
name="My Tool with Dynamic List Input",
description="This is my tool with dynamic list input",
input_settings=input_settings,
)
def my_tool(input_text: list, input_prefix: str) -> str:
return f"Hello {input_prefix} {','.join(input_text)}"
with pytest.raises(ToolValidationError) as exception:
_client.tools.validate(my_tool, raise_error=True)
assert "Cannot find invalid_input in the inputs of dynamic_list func" in exception.value.message
# check required inputs of dynamic list func
invalid_dynamic_list_setting = DynamicList(function=my_list_func, input_mapping={"size": "input_prefix"})
input_settings = {
"input_text": InputSetting(
dynamic_list=invalid_dynamic_list_setting,
)
}
@tool(
name="My Tool with Dynamic List Input",
description="This is my tool with dynamic list input",
input_settings=input_settings,
)
def my_tool(input_text: list, input_prefix: str) -> str:
return f"Hello {input_prefix} {','.join(input_text)}"
with pytest.raises(ToolValidationError) as exception:
_client.tools.validate(my_tool, raise_error=True)
assert "Missing required input(s) of dynamic_list function: ['prefix']" in exception.value.message
def test_enabled_by_with_invalid_input(self):
# value in enabled_by_value doesn't exist in tool inputs
input1_settings = InputSetting(enabled_by="invalid_input")
@tool(name="enabled_by_with_invalid_input", input_settings={"input1": input1_settings})
def enabled_by_with_invalid_input(input1: str, input2: str):
pass
with pytest.raises(ToolValidationError) as exception:
_client.tools.validate(enabled_by_with_invalid_input, raise_error=True)
assert 'Cannot find the input \\"invalid_input\\"' in exception.value.message
def test_tool_with_file_path_input(self):
tool_path = TOOL_ROOT / "tool_with_file_path_input.py"
tool_meta = self.get_tool_meta(tool_path)
expect_tool_meta = {
"test_tool.tool_with_file_path_input.my_tool": {
"name": "Tool with FilePath Input",
"type": "python",
"inputs": {"input_file": {"type": ["file_path"]}, "input_text": {"type": ["string"]}},
"description": "This is a tool to demonstrate the usage of FilePath input",
"module": "test_tool.tool_with_file_path_input",
"function": "my_tool",
}
}
assert expect_tool_meta == tool_meta
def test_tool_with_generated_by_input(self):
tool_path = TOOL_ROOT / "tool_with_generated_by_input.py"
tool_meta = self.get_tool_meta(tool_path)
with open(TOOL_ROOT / "expected_generated_by_meta.json", "r") as f:
expect_tool_meta = json.load(f)
assert expect_tool_meta == tool_meta
def test_validate_tool_script(self):
tool_script_path = TOOL_ROOT / "custom_llm_tool.py"
result = _client.tools.validate(tool_script_path)
assert result.passed
tool_script_path = TOOL_ROOT / "tool_with_dynamic_list_input.py"
result = _client.tools.validate(tool_script_path)
assert result.passed
tool_script_path = TOOL_ROOT / "invalid_tool.py"
result = _client.tools.validate(tool_script_path)
assert len(result._errors) == 4
assert "1 is not of type 'string'" in result.error_messages["invalid_schema_type"]
assert (
"Cannot provide both `icon` and `icon_light` or `icon_dark`." in result.error_messages["invalid_tool_icon"]
)
assert (
'Cannot find the input "invalid_input" for the enabled_by of teacher_id.'
in result.error_messages["invalid_input_settings"]
)
assert (
'Cannot find the input "invalid_input" for the enabled_by of student_id.'
in result.error_messages["invalid_input_settings"]
)
assert all(str(tool_script_path) == item.location for item in result._errors)
with pytest.raises(ToolValidationError):
_client.tools.validate(TOOL_ROOT / "invalid_tool.py", raise_error=True)
def test_validate_tool_func(self):
def load_module_by_path(source):
module_name = Path(source).stem
spec = importlib.util.spec_from_file_location(module_name, source)
module = importlib.util.module_from_spec(spec)
# Load the module's code
spec.loader.exec_module(module)
return module
tool_script_path = TOOL_ROOT / "custom_llm_tool.py"
module = load_module_by_path(tool_script_path)
tool_func = getattr(module, "my_tool")
result = _client.tools.validate(tool_func)
assert result.passed
tool_script_path = TOOL_ROOT / "invalid_tool.py"
module = load_module_by_path(tool_script_path)
tool_func = getattr(module, "invalid_schema_type")
result = _client.tools.validate(tool_func)
assert "invalid_schema_type" in result.error_messages
assert "1 is not of type 'string'" in result.error_messages["invalid_schema_type"]
assert "invalid_schema_type" == result._errors[0].function_name
assert str(tool_script_path) == result._errors[0].location
with pytest.raises(ToolValidationError):
_client.tools.validate(tool_func, raise_error=True)
def test_validate_package_tool(self):
package_tool_path = TOOL_ROOT / "tool_package"
sys.path.append(str(package_tool_path.resolve()))
import tool_package
with patch("promptflow._sdk.operations._tool_operations.ToolOperations._is_package_tool", return_value=True):
result = _client.tools.validate(tool_package)
assert len(result._errors) == 4
assert "1 is not of type 'string'" in result.error_messages["invalid_schema_type"]
assert (
"Cannot provide both `icon` and `icon_light` or `icon_dark`." in result.error_messages["invalid_tool_icon"]
)
assert (
'Cannot find the input "invalid_input" for the enabled_by of teacher_id.'
in result.error_messages["invalid_input_settings"]
)
assert (
'Cannot find the input "invalid_input" for the enabled_by of student_id.'
in result.error_messages["invalid_input_settings"]
)
def test_validate_tool_class(self):
from promptflow.tools.serpapi import SerpAPI
result = _client.tools.validate(SerpAPI)
assert result.passed
class InvalidToolClass(ToolProvider):
def __init__(self):
super().__init__()
@tool(name="My Custom Tool")
def tool_func(self, api: str):
pass
@tool(name=1)
def invalid_tool_func(self, api: str):
pass
result = _client.tools.validate(InvalidToolClass)
assert not result.passed
assert result._kwargs["total_count"] == 2
assert result._kwargs["invalid_count"] == 1
assert len(result._errors) == 1
assert "1 is not of type 'string'" in result._errors[0].message
| promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_tool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_tool.py",
"repo_id": "promptflow",
"token_count": 8463
} | 57 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
from promptflow import PFClient
from promptflow._sdk._utils import ClientUserAgentUtil
@pytest.mark.sdk_test
@pytest.mark.e2etest
class TestPFClient:
def test_pf_client_user_agent(self):
PFClient()
assert "promptflow-sdk" in ClientUserAgentUtil.get_user_agent()
assert "promptflow/" not in ClientUserAgentUtil.get_user_agent()
| promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_pf_client.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_pf_client.py",
"repo_id": "promptflow",
"token_count": 166
} | 58 |
{
"subscription_id": "",
"resource_group": "",
"workspace_name": ""
} | promptflow/src/promptflow/tests/test_configs/configs/mock_flow_empty_config/.azureml/config.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/configs/mock_flow_empty_config/.azureml/config.json",
"repo_id": "promptflow",
"token_count": 37
} | 59 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/WeaviateConnection.schema.json
name: my_weaviate_connection
type: weaviate
api_key: "<to-be-replaced>"
api_base: "endpoint"
| promptflow/src/promptflow/tests/test_configs/connections/weaviate_connection.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/connections/weaviate_connection.yaml",
"repo_id": "promptflow",
"token_count": 73
} | 60 |
path: ./entry.py
entry: my_flow | promptflow/src/promptflow/tests/test_configs/eager_flows/flow_with_dataclass_output/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/flow_with_dataclass_output/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 12
} | 61 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
def my_flow(input_val) -> str:
"""Simple flow with yaml."""
print(f"Hello world! {input_val}")
return f"Hello world! {input_val}"
| promptflow/src/promptflow/tests/test_configs/eager_flows/simple_with_req/entry.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/simple_with_req/entry.py",
"repo_id": "promptflow",
"token_count": 81
} | 62 |
inputs:
text:
type: string
outputs:
text:
type: string
reference: ${node_a.output}
nodes:
- name: node_a
type: python
source:
type: code
path: node_a.py
inputs:
input1: ${inputs.text}
- name: node_b
type: python
source:
type: code
path: node_b.py
inputs: {}
activate:
when: ${node_a.output}
is: hello world
| promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/activate_with_no_inputs/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 162
} | 63 |
from promptflow import tool
@tool
def test(text: str):
return text + "hello world!"
| promptflow/src/promptflow/tests/test_configs/flows/all_nodes_bypassed/test.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/all_nodes_bypassed/test.py",
"repo_id": "promptflow",
"token_count": 29
} | 64 |
{"text": "Hello World!"}
{"text": "Hello PromptFlow!"}
| promptflow/src/promptflow/tests/test_configs/flows/basic-with-connection/data.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/basic-with-connection/data.jsonl",
"repo_id": "promptflow",
"token_count": 19
} | 65 |
from openai import AsyncOpenAI
from promptflow import tool
from promptflow.connections import OpenAIConnection
@tool
async def get_or_create_thread(conn: OpenAIConnection, thread_id: str):
if thread_id:
return thread_id
cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization)
thread = await cli.beta.threads.create()
return thread.id
| promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_or_create_thread.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_or_create_thread.py",
"repo_id": "promptflow",
"token_count": 136
} | 66 |
{
"grade.completed": 3,
"calculate_accuracy.completed": 1,
"aggregation_assert.completed": 1
} | promptflow/src/promptflow/tests/test_configs/flows/classification_accuracy_evaluation/expected_status_summary.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/classification_accuracy_evaluation/expected_status_summary.json",
"repo_id": "promptflow",
"token_count": 46
} | 67 |
system:
You are a helpful assistant.
user:
When an incident occurs, there have 3 ways to deal with it, please choose one.
1. {{first_method}}
2. {{second_method}}
3. {{third_method}} | promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/investigation_steps.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/investigation_steps.jinja2",
"repo_id": "promptflow",
"token_count": 55
} | 68 |
{"question": "What's promptflow1?"}
{"question": "What's promptflow2?"}
{"question": "What's promptflow3?"} | promptflow/src/promptflow/tests/test_configs/flows/csharp_flow/inputs.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/csharp_flow/inputs.jsonl",
"repo_id": "promptflow",
"token_count": 35
} | 69 |
from promptflow import tool
from promptflow.contracts.multimedia import Image
@tool
def merge_images(image_list: list, image_dict: list):
res = set()
for item in image_list[0]:
res.add(item)
for _, v in image_dict[0].items():
res.add(v)
assert all(isinstance(item, Image) for item in res)
return list(res)
| promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_composite_image/merge_images.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_composite_image/merge_images.py",
"repo_id": "promptflow",
"token_count": 134
} | 70 |
import os
from promptflow import tool
@tool
def get_env_var(key: str):
from tensorflow import __version__
print(__version__)
print(os.environ.get(key))
# get from env var
return {"value": os.environ.get(key)}
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_additional_include_req/print_env.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_additional_include_req/print_env.py",
"repo_id": "promptflow",
"token_count": 89
} | 71 |
ignored_folder
*.ignored | promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/.amlignore/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/.amlignore",
"repo_id": "promptflow",
"token_count": 8
} | 72 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
text:
type: string
default: Hello 日本語
outputs:
output:
type: string
reference: ${hello_prompt.output}
nodes:
- name: hello_prompt
type: prompt
source:
type: code
path: hello.jinja2
inputs:
text: ${inputs.text} | promptflow/src/promptflow/tests/test_configs/flows/flow_with_non_english_input/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_non_english_input/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 144
} | 73 |
inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${echo_my_prompt.output}
nodes:
- inputs:
input1: ${inputs.text}
name: echo_my_prompt
type: python
source:
type: code
path: hello.py
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_sys_inject/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_sys_inject/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 105
} | 74 |
inputs:
topic:
type: string
default: hello world
is_chat_input: false
stream:
type: bool
default: false
is_chat_input: false
outputs:
joke:
type: string
reference: ${echo.output}
nodes:
- name: echo
type: python
source:
type: code
path: echo.py
inputs:
input: ${joke.output}
use_variants: false
- name: joke
type: llm
source:
type: code
path: joke.jinja2
inputs:
deployment_name: gpt-35-turbo
temperature: 1
top_p: 1
max_tokens: 256
presence_penalty: 0
frequency_penalty: 0
stream: ${inputs.stream}
topic: ${inputs.topic}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: chat
module: promptflow.tools.aoai
use_variants: false
| promptflow/src/promptflow/tests/test_configs/flows/llm_tool/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/llm_tool/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 313
} | 75 |
from promptflow import tool
@tool
def mod_two(number: int):
if number % 2 != 0:
raise Exception("cannot mod 2!")
return {"value": number}
| promptflow/src/promptflow/tests/test_configs/flows/mod-n/two/mod_two.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/mod-n/two/mod_two.py",
"repo_id": "promptflow",
"token_count": 58
} | 76 |
{
"prompt": "What is the capital of the United States of America?"
}
| promptflow/src/promptflow/tests/test_configs/flows/openai_completion_api_flow/samples.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/openai_completion_api_flow/samples.json",
"repo_id": "promptflow",
"token_count": 24
} | 77 |
Subsets and Splits