repo_id
stringlengths
15
132
file_path
stringlengths
34
176
content
stringlengths
2
3.52M
__index_level_0__
int64
0
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/summarize_text_content.jinja2
Please summarize the following text in one paragraph. 100 words. Do not add any information that is not in the text. Text: {{text}} Summary:
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/flow.dag.yaml
inputs: url: type: string default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h outputs: category: type: string reference: ${convert_to_dict.output.category} evidence: type: string reference: ${convert_to_dict.output.evidence} nodes: - name: fetch_text_content_from_url type: python source: type: code path: fetch_text_content_from_url.py inputs: url: ${inputs.url} - name: summarize_text_content type: llm source: type: code path: summarize_text_content.jinja2 inputs: deployment_name: gpt-35-turbo suffix: '' max_tokens: '128' temperature: '0.2' top_p: '1.0' logprobs: '' echo: 'False' stop: '' presence_penalty: '0' frequency_penalty: '0' best_of: '1' logit_bias: '' text: ${fetch_text_content_from_url.output} provider: AzureOpenAI connection: azure_open_ai_connection api: completion module: promptflow.tools.aoai use_variants: true - name: prepare_examples type: python source: type: code path: prepare_examples.py inputs: {} - name: classify_with_llm type: llm source: type: code path: classify_with_llm.jinja2 inputs: deployment_name: gpt-35-turbo suffix: '' max_tokens: '128' temperature: '0.2' top_p: '1.0' logprobs: '' echo: 'False' stop: '' presence_penalty: '0' frequency_penalty: '0' best_of: '1' logit_bias: '' url: ${inputs.url} examples: ${prepare_examples.output} text_content: ${summarize_text_content.output} provider: AzureOpenAI connection: azure_open_ai_connection api: completion module: promptflow.tools.aoai - name: convert_to_dict type: python source: type: code path: convert_to_dict.py inputs: input_str: ${classify_with_llm.output} node_variants: summarize_text_content: default_variant_id: variant_1 variants: variant_0: node: type: llm source: type: code path: summarize_text_content.jinja2 inputs: deployment_name: gpt-35-turbo suffix: '' max_tokens: '128' temperature: '0.2' top_p: '1.0' logprobs: '' echo: 'False' stop: '' presence_penalty: '0' frequency_penalty: '0' best_of: '1' logit_bias: '' text: ${fetch_text_content_from_url.output} provider: AzureOpenAI connection: azure_open_ai_connection api: completion module: promptflow.tools.aoai variant_1: node: type: llm source: type: code path: summarize_text_content__variant_1.jinja2 inputs: deployment_name: gpt-35-turbo suffix: '' max_tokens: '256' temperature: '0.2' top_p: '1.0' logprobs: '' echo: 'False' stop: '' presence_penalty: '0' frequency_penalty: '0' best_of: '1' logit_bias: '' text: ${fetch_text_content_from_url.output} provider: AzureOpenAI connection: azure_open_ai_connection api: completion module: promptflow.tools.aoai
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/prepare_examples.py
from promptflow import tool @tool def prepare_examples(): return [ { "url": "https://play.google.com/store/apps/details?id=com.spotify.music", "text_content": "Spotify is a free music and podcast streaming app with millions of songs, albums, and original podcasts. It also offers audiobooks, so users can enjoy thousands of stories. It has a variety of features such as creating and sharing music playlists, discovering new music, and listening to popular and exclusive podcasts. It also has a Premium subscription option which allows users to download and listen offline, and access ad-free music. It is available on all devices and has a variety of genres and artists to choose from.", "category": "App", "evidence": "Both", }, { "url": "https://www.youtube.com/channel/UC_x5XG1OV2P6uZZ5FSM9Ttw", "text_content": "NFL Sunday Ticket is a service offered by Google LLC that allows users to watch NFL games on YouTube. It is available in 2023 and is subject to the terms and privacy policy of Google LLC. It is also subject to YouTube's terms of use and any applicable laws.", "category": "Channel", "evidence": "URL", }, { "url": "https://arxiv.org/abs/2303.04671", "text_content": "Visual ChatGPT is a system that enables users to interact with ChatGPT by sending and receiving not only languages but also images, providing complex visual questions or visual editing instructions, and providing feedback and asking for corrected results. It incorporates different Visual Foundation Models and is publicly available. Experiments show that Visual ChatGPT opens the door to investigating the visual roles of ChatGPT with the help of Visual Foundation Models.", "category": "Academic", "evidence": "Text content", }, { "url": "https://ab.politiaromana.ro/", "text_content": "There is no content available for this text.", "category": "None", "evidence": "None", }, ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/samples.json
[ { "line_number": 0, "variant_id": "variant_0", "groundtruth": "App", "prediction": "App" } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_v2/.promptflow/flow.tools.json
{ "package": {}, "code": { "fetch_text_content_from_url.py": { "type": "python", "inputs": { "url": { "type": [ "string" ] } }, "function": "fetch_text_content_from_url" }, "summarize_text_content.jinja2": { "type": "llm", "inputs": { "text": { "type": [ "string" ] } }, "description": "Summarize webpage content into a short paragraph." }, "summarize_text_content__variant_1.jinja2": { "type": "llm", "inputs": { "text": { "type": [ "string" ] } } }, "prepare_examples.py": { "type": "python", "function": "prepare_examples" }, "classify_with_llm.jinja2": { "type": "llm", "inputs": { "url": { "type": [ "string" ] }, "examples": { "type": [ "string" ] }, "text_content": { "type": [ "string" ] } }, "description": "Multi-class classification of a given url and text content." }, "convert_to_dict.py": { "type": "python", "inputs": { "input_str": { "type": [ "string" ] } }, "function": "convert_to_dict" } } }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_trace_async/flow.dag.yaml
inputs: user_id: type: int default: 1 outputs: output: type: string reference: ${greetings.output.greeting} nodes: - name: greetings type: python source: type: code path: greetings.py inputs: user_id: ${inputs.user_id}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_trace_async/greetings.py
import asyncio from time import sleep from promptflow import tool, trace @trace async def is_valid_name(name): await asyncio.sleep(0.5) return len(name) > 0 @trace async def get_user_name(user_id): await asyncio.sleep(0.5) user_name = f"User {user_id}" if not await is_valid_name(user_name): raise ValueError(f"Invalid user name: {user_name}") return user_name @trace async def format_greeting(user_name): await asyncio.sleep(0.5) return f"Hello, {user_name}!" @tool async def greetings(user_id): user_name = await get_user_name(user_id) greeting = await format_greeting(user_name) print(greeting) return {"greeting": greeting}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/assistant_definition.yaml
model: mock_model instructions: mock_instructions tools: - type: function tool_type: python source: type: code path: echo.py
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/echo.py
from promptflow import tool @tool def echo(message: str): """This tool is used to echo the message back. :param message: The message to echo. :type message: str """ return message
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/test_assistant_definition.py
from promptflow import tool from promptflow.contracts.types import AssistantDefinition @tool def test_assistant_definition(message: str, assistant_definition: AssistantDefinition): assert assistant_definition.model == "mock_model" assert assistant_definition.instructions == "mock_instructions" invoker = assistant_definition.init_tool_invoker() openai_definition = invoker.to_openai_tools() assert len(openai_definition) == 1 assert openai_definition[0]["function"]["description"] == "This tool is used to echo the message back." assert openai_definition[0]["function"]["parameters"]["properties"] == { "message": {"description": "The message to echo.", "type": "string"} } assert openai_definition[0]["function"]["parameters"]["required"] == ["message"] assert invoker.invoke_tool("echo", {"message": message}) == "Hello World!" return assistant_definition.serialize()
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/flow.dag.yaml
inputs: message: type: string default: Hello World! outputs: output: type: object reference: ${test_assistant_definition.output} nodes: - name: test_assistant_definition type: python source: type: code path: test_assistant_definition.py inputs: message: ${inputs.message} assistant_definition: assistant_definition.yaml
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/Dockerfile
# syntax=docker/dockerfile:1 FROM docker.io/continuumio/miniconda3:latest WORKDIR / COPY ./flow /flow # create conda environment RUN conda create -n promptflow-serve python=3.9.16 pip=23.0.1 -q -y && \ conda run -n promptflow-serve \ pip install -r /flow/requirements_txt && \ conda run -n promptflow-serve pip install keyrings.alt && \ conda run -n promptflow-serve pip install gunicorn==20.1.0 && \ conda run -n promptflow-serve pip cache purge && \ conda clean -a -y RUN apt-get update && apt-get install -y runit EXPOSE 8080 COPY ./connections/* /connections/ # reset runsvdir RUN rm -rf /var/runit COPY ./runit /var/runit # grant permission RUN chmod -R +x /var/runit COPY ./start.sh / CMD ["bash", "./start.sh"]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/start.sh
#!/bin/bash # stop services created by runsv and propagate SIGINT, SIGTERM to child jobs sv_stop() { echo "$(date -uIns) - Stopping all runsv services" for s in $(ls -d /var/runit/*); do sv stop $s done } # register SIGINT, SIGTERM handler trap sv_stop SIGINT SIGTERM # start services in background and wait all child jobs runsvdir /var/runit & wait
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/README.md
Exported Dockerfile & its dependencies are located in the same folder. The structure is as below: - flow: the folder contains all the flow files - ... - connections: the folder contains yaml files to create all related connections - ... - runit: the folder contains all the runit scripts - ... - Dockerfile: the dockerfile to build the image - start.sh: the script used in `CMD` of `Dockerfile` to start the service - settings.json: a json file to store the settings of the docker image - README.md: the readme file to describe how to use the dockerfile Please refer to [official doc](https://microsoft.github.io/promptflow/how-to-guides/deploy-and-export-a-flow.html#export-a-flow) for more details about how to use the exported dockerfile and scripts.
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/settings.json
{ "CUSTOM_CONNECTION_AZURE_OPENAI_API_KEY": "" }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/connections/custom_connection.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json type: custom name: custom_connection configs: CHAT_DEPLOYMENT_NAME: gpt-35-turbo AZURE_OPENAI_API_BASE: https://gpt-test-eus.openai.azure.com/ secrets: AZURE_OPENAI_API_KEY: ${env:CUSTOM_CONNECTION_AZURE_OPENAI_API_KEY} module: promptflow.connections
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit/promptflow-serve/run
#! /bin/bash CONDA_ENV_PATH="$(conda info --base)/envs/promptflow-serve" export PATH="$CONDA_ENV_PATH/bin:$PATH" ls ls /connections pf connection create --file /connections/custom_connection.yaml echo "start promptflow serving with worker_num: 8, worker_threads: 1" cd /flow gunicorn -w 8 --threads 1 -b "0.0.0.0:8080" --timeout 300 "promptflow._sdk._serving.app:create_app()"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit/promptflow-serve/finish
#!/bin/bash echo "$(date -uIns) - promptflow-serve/finish $@" # stop all gunicorn processes echo "$(date -uIns) - Stopping all Gunicorn processes" pkill gunicorn while pgrep gunicorn >/dev/null; do echo "$(date -uIns) - Gunicorn process is still running, waiting for 1s" sleep 1 done echo "$(date -uIns) - Stopped all Gunicorn processes"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/setup.sh
echo Hello Promptflow!
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/requirements_txt
keyrings.alt promptflow-tools promptflow langchain
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/user_intent_few_shot.jinja2
You are given a list of orders with item_numbers from a customer and a statement from the customer. It is your job to identify the intent that the customer has with their statement. Possible intents can be: "product return", "product exchange", "general question", "product question", "other". If the intent is product related ("product return", "product exchange", "product question"), then you should also provide the order id and item that the customer is referring to in their statement. For instance if you are give the following list of orders: order_number: 2020230 date: 2023-04-23 store_location: SeattleStore items: - description: Roof Rack, color black, price $199.99 item_number: 101010 - description: Running Shoes, size 10, color blue, price $99.99 item_number: 202020 You are given the following customer statements: - I am having issues with the jobbing shoes I bought. Then you should answer with in valid yaml format with the fields intent, order_number, item, and item_number like so: intent: product question order_number: 2020230 descrption: Running Shoes, size 10, color blue, price $99.99 item_number: 202020 Here is the actual problem you need to solve: In triple backticks below is the customer information and a list of orders. ``` {{customer_info}} ``` In triple backticks below are the is the chat history with customer statements and replies from the customer service agent: ``` {{chat_history}} ``` What is the customer's `intent:` here? "product return", "exchange product", "general question", "product question" or "other"? Reply with only the intent string.
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/intent.py
import os import pip def extract_intent(chat_prompt: str): from langchain.chat_models import AzureChatOpenAI from langchain.schema import HumanMessage if "AZURE_OPENAI_API_KEY" not in os.environ: # load environment variables from .env file try: from dotenv import load_dotenv except ImportError: # This can be removed if user using custom image. pip.main(["install", "python-dotenv"]) from dotenv import load_dotenv load_dotenv() chat = AzureChatOpenAI( deployment_name=os.environ["CHAT_DEPLOYMENT_NAME"], openai_api_key=os.environ["AZURE_OPENAI_API_KEY"], openai_api_base=os.environ["AZURE_OPENAI_API_BASE"], openai_api_type="azure", openai_api_version="2023-03-15-preview", temperature=0, ) reply_message = chat([HumanMessage(content=chat_prompt)]) return reply_message.content def generate_prompt(customer_info: str, history: list, user_prompt_template: str): from langchain.prompts.chat import ChatPromptTemplate, HumanMessagePromptTemplate from langchain.prompts.prompt import PromptTemplate chat_history_text = "\n".join( [message["role"] + ": " + message["content"] for message in history] ) prompt_template = PromptTemplate.from_template(user_prompt_template) chat_prompt_template = ChatPromptTemplate.from_messages( [ HumanMessagePromptTemplate(prompt=prompt_template) ] ) return chat_prompt_template.format_prompt(customer_info=customer_info, chat_history=chat_history_text).to_string() if __name__ == "__main__": import json with open("./data/denormalized-flat.jsonl", "r") as f: data = [json.loads(line) for line in f.readlines()] # only ten samples data = data[:10] # load template from file with open("user_intent_zero_shot.md", "r") as f: user_prompt_template = f.read() # each test for item in data: chat_prompt = generate_prompt(item["customer_info"], item["history"], user_prompt_template) reply = extract_intent(chat_prompt) print("=====================================") # print("Customer info: ", item["customer_info"]) # print("+++++++++++++++++++++++++++++++++++++") print("Chat history: ", item["history"]) print("+++++++++++++++++++++++++++++++++++++") print(reply) print("+++++++++++++++++++++++++++++++++++++") print(f"Ground Truth: {item['intent']}") print("=====================================")
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/flow.dag.yaml
inputs: customer_info: type: string chat_history: type: string outputs: output: type: string reference: ${extract_intent.output} nodes: - name: chat_prompt type: prompt source: type: code path: user_intent_zero_shot.jinja2 inputs: # Please check the generated prompt inputs customer_info: ${inputs.customer_info} chat_history: ${inputs.chat_history} - name: extract_intent type: python source: type: code path: extract_intent_tool.py inputs: chat_prompt: ${chat_prompt.output} connection: custom_connection environment: python_requirements_txt: requirements_txt
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/.amlignore
*.ipynb .venv/ .data/ .env .vscode/ outputs/ connection.json .gitignore README.md eval_cli.md data/
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/user_intent_zero_shot.jinja2
You are given a list of orders with item_numbers from a customer and a statement from the customer. It is your job to identify the intent that the customer has with their statement. Possible intents can be: "product return", "product exchange", "general question", "product question", "other". In triple backticks below is the customer information and a list of orders. ``` {{customer_info}} ``` In triple backticks below are the is the chat history with customer statements and replies from the customer service agent: ``` {{chat_history}} ``` What is the customer's `intent:` here? "product return", "exchange product", "general question", "product question" or "other"? Reply with only the intent string.
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/export/linux/flow/extract_intent_tool.py
import os from promptflow import tool from promptflow.connections import CustomConnection from intent import extract_intent @tool def extract_intent_tool( chat_prompt, connection: CustomConnection) -> str: # set environment variables for key, value in dict(connection).items(): os.environ[key] = value # call the entry function return extract_intent( chat_prompt=chat_prompt, )
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_invalid_default_value/pick_an_image.py
import random from promptflow.contracts.multimedia import Image from promptflow import tool @tool def pick_an_image(image_1: Image, image_2: Image) -> Image: if random.choice([True, False]): return image_1 else: return image_2
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_invalid_default_value/flow.dag.yaml
inputs: image: type: image default: "" outputs: output: type: image reference: ${python_node_2.output} nodes: - name: python_node type: python source: type: code path: pick_an_image.py inputs: image_1: ${inputs.image} image_2: logo_2.png - name: python_node_2 type: python source: type: code path: pick_an_image.py inputs: image_1: ${python_node.output} image_2: logo_2.png
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/csharp_flow/flow.dag.yaml
language: csharp inputs: question: type: string default: what is promptflow? outputs: answer: type: string reference: ${get_answer.output} nodes: - name: get_answer type: csharp source: type: package tool: (Basic)Basic.Flow.HelloWorld inputs: question: ${inputs.question}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/csharp_flow/inputs.jsonl
{"question": "What's promptflow1?"} {"question": "What's promptflow2?"} {"question": "What's promptflow3?"}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow-with-nan-inf/flow.dag.yaml
inputs: number: type: int outputs: output: type: object reference: ${nan_inf.output} nodes: - name: nan_inf type: python source: type: code path: nan_inf.py inputs: number: ${inputs.number}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow-with-nan-inf/nan_inf.py
from promptflow import tool @tool def nan_inf(number: int): print(number) return {"nan": float("nan"), "inf": float("inf")}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_exception/show_answer.py
from promptflow import tool @tool def show_answer(chat_answer: str): raise Exception("mock exception")
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_exception/chat.jinja2
system: You are a helpful assistant. {% for item in chat_history %} user: {{item.inputs.question}} assistant: {{item.outputs.answer}} {% endfor %} user: {{question}}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_exception/flow.dag.yaml
inputs: chat_history: type: list question: type: string is_chat_input: true default: What is ChatGPT? outputs: answer: type: string reference: ${show_answer.output} is_chat_output: true nodes: - inputs: deployment_name: gpt-35-turbo max_tokens: "256" temperature: "0.7" chat_history: ${inputs.chat_history} question: ${inputs.question} name: chat type: llm source: type: code path: chat.jinja2 api: chat provider: AzureOpenAI connection: azure_open_ai_connection - name: show_answer type: python source: type: code path: show_answer.py inputs: chat_answer: ${chat.output}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_special_character/script_with_special_character.py
from promptflow import tool @tool def print_special_character(input1: str) -> str: # Add special character to test if file read is working. return "https://www.bing.com//"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_special_character/script_with_special_character.meta.json
{ "name": "script_with_special_character", "type": "python", "inputs": { "input1": { "type": [ "string" ] } }, "source": "script_with_special_character.py", "function": "print_special_character" }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/async_tools/async_passthrough.py
from promptflow import tool import asyncio @tool async def passthrough_str_and_wait(input1: str, wait_seconds=3) -> str: assert isinstance(input1, str), f"input1 should be a string, got {input1}" print(f"Wait for {wait_seconds} seconds in async function") for i in range(wait_seconds): print(i) await asyncio.sleep(1) return input1
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/async_tools/flow.dag.yaml
inputs: input_str: type: string default: Hello outputs: ouput1: type: string reference: ${async_passthrough1.output} output2: type: string reference: ${async_passthrough2.output} nodes: - name: async_passthrough type: python source: type: code path: async_passthrough.py inputs: input1: ${inputs.input_str} wait_seconds: 3 - name: async_passthrough1 type: python source: type: code path: async_passthrough.py inputs: input1: ${async_passthrough.output} wait_seconds: 3 - name: async_passthrough2 type: python source: type: code path: async_passthrough.py inputs: input1: ${async_passthrough.output} wait_seconds: 3
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/sync_fail.py
from promptflow import tool def raise_exception(s): msg = f"In raise_exception: {s}" raise Exception(msg) @tool def raise_an_exception(s: str): try: raise_exception(s) except Exception as e: raise Exception(f"In tool raise_an_exception: {s}") from e
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/flow.dag.yaml
inputs: text: type: string default: dummy_input outputs: output_prompt: type: string reference: ${sync_fail.output} nodes: - name: sync_fail type: python source: type: code path: sync_fail.py inputs: s: ${inputs.text}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/unordered_nodes/flow.dag.yaml
name: node_wrong_order inputs: text: type: string outputs: result: type: string reference: ${third_node} nodes: - name: third_node type: python source: type: code path: test.py inputs: text: ${second_node} - name: first_node type: python source: type: code path: test.py inputs: text: ${inputs.text} - name: second_node type: python source: type: code path: test.py inputs: text: ${first_node}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image_without_default/pick_an_image.py
import random from promptflow.contracts.multimedia import Image from promptflow import tool @tool def pick_an_image(image_1: Image, image_2: Image) -> Image: if random.choice([True, False]): return image_1 else: return image_2
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image_without_default/flow.dag.yaml
inputs: image_1: type: image image_2: type: image outputs: output: type: image reference: ${python_node.output} nodes: - name: python_node type: python source: type: code path: pick_an_image.py inputs: image_1: ${inputs.image_1} image_2: ${inputs.image_2}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_defined_chat_history/show_answer.py
from promptflow import tool @tool def show_answer(chat_answer: str): print("print:", chat_answer) return chat_answer
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_defined_chat_history/chat.jinja2
system: You are a helpful assistant. {% for item in chat_history %} user: {{item.inputs.question}} assistant: {{item.outputs.answer}} {% endfor %} user: {{question}}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_defined_chat_history/flow.dag.yaml
inputs: user_chat_history: type: list is_chat_history: true question: type: string is_chat_input: true default: What is ChatGPT? outputs: answer: type: string reference: ${show_answer.output} is_chat_output: true nodes: - inputs: deployment_name: gpt-35-turbo max_tokens: "256" temperature: "0.7" chat_history: ${inputs.user_chat_history} question: ${inputs.question} name: chat_node type: llm source: type: code path: chat.jinja2 api: chat provider: AzureOpenAI connection: azure_open_ai_connection - name: show_answer type: python source: type: code path: show_answer.py inputs: chat_answer: ${chat_node.output} node_variants: {}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_input_dir/details.jsonl
{"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"} {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"} {"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_script_tool_with_custom_strong_type_connection/data.jsonl
{"text": "Hello World!"}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_script_tool_with_custom_strong_type_connection/my_script_tool.py
from promptflow import tool from promptflow.connections import CustomStrongTypeConnection, CustomConnection from promptflow.contracts.types import Secret class MyCustomConnection(CustomStrongTypeConnection): """My custom strong type connection. :param api_key: The api key. :type api_key: String :param api_base: The api base. :type api_base: String """ api_key: Secret api_url: str = "This is a fake api url." @tool def my_tool(connection: MyCustomConnection, input_param: str) -> str: # Replace with your tool code. # Use custom strong type connection like: connection.api_key, connection.api_url return f"connection_value is MyCustomConnection: {str(isinstance(connection, MyCustomConnection))}"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_script_tool_with_custom_strong_type_connection/flow.dag.yaml
inputs: text: type: string default: this is an input outputs: out: type: string reference: ${my_script_tool.output} nodes: - name: my_script_tool type: python source: type: code path: my_script_tool.py inputs: connection: custom_connection_2 input_param: ${inputs.text}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/data.jsonl
{"groundtruth": "Tomorrow's weather will be sunny.","prediction": "The weather will be sunny tomorrow."} {"groundtruth": "Hello,","prediction": "World."} {"groundtruth": "Promptflow is a super easy-to-use tool, right?","prediction": "Yes!"}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/line_process.py
from promptflow import tool @tool def line_process(groundtruth: str, prediction: str): processed_result = groundtruth + prediction return processed_result
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/aggregate.py
from typing import List from promptflow import tool @tool def aggregate(processed_results: List[str]): aggregated_results = processed_results # raise error to test aggregation node failed num = 1/0 return aggregated_results
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/expected_status_summary.json
{ "line_process.completed": 3, "aggregate.failed": 1 }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/flow.dag.yaml
id: template_eval_flow name: Template Evaluation Flow inputs: groundtruth: type: string is_chat_input: false prediction: type: string is_chat_input: false outputs: results: type: string reference: ${line_process.output} nodes: - name: line_process type: python source: type: code path: line_process.py inputs: groundtruth: ${inputs.groundtruth} prediction: ${inputs.prediction} use_variants: false - name: aggregate type: python source: type: code path: aggregate.py inputs: processed_results: ${line_process.output} aggregation: true use_variants: false node_variants: {} environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/samples.json
[ { "groundtruth": "Tomorrow's weather will be sunny.", "prediction": "The weather will be sunny tomorrow." }, { "groundtruth": "Hello,", "prediction": "World." }, { "groundtruth": "Promptflow is a super easy-to-use tool, right?", "prediction": "Yes!" } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/assistant_definition.yaml
model: gpt-4-1106-preview instructions: You are a helpful assistant. tools: - type: code_interpreter - type: function source: type: code path: get_calorie_by_jogging.py tool_type: python - type: function source: type: code path: get_calorie_by_swimming.py tool_type: python - type: function source: type: code path: get_current_city.py tool_type: python - type: function source: type: code path: get_temperature.py tool_type: python
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_or_create_thread.py
from openai import AsyncOpenAI from promptflow import tool from promptflow.connections import OpenAIConnection @tool async def get_or_create_thread(conn: OpenAIConnection, thread_id: str): if thread_id: return thread_id cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization) thread = await cli.beta.threads.create() return thread.id
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/data.jsonl
{"chat_history":[], "question": "If I am going to run with 1.5 hours this morning, how many calories will I burn?", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""} {"chat_history":[], "question": "I'm going to swim in Guangzhou city today for 30 min, how much calories will I burn?", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""} {"chat_history":[], "question": "I'm going to run slowly on local street today, how much calories will I burn?", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""} {"chat_history":[], "question": "If I am going to run 1.5 hours under 24 degrees Celsius, how many calories will I burn", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""} {"chat_history":[], "question": "I'm going to biking for 2 hours duration today, how much calories will I burn?", "assistant_id": "asst_yWhdFYoCS1UatnRRQZGY85aL", "thread_id": ""}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_calorie_by_jogging.py
import random import time from promptflow import tool @tool def get_calorie_by_jogging(duration: float, temperature: float): """Estimate the calories burned by jogging based on duration and temperature. :param duration: the length of the jogging in hours. :type duration: float :param temperature: the environment temperature in degrees Celsius. :type temperature: float """ print( f"Figure out the calories burned by jogging, with temperature of {temperature} degrees Celsius, " f"and duration of {duration} hours." ) # Generating a random number between 0.2 and 1 for tracing purpose time.sleep(random.uniform(0.2, 1)) return random.randint(50, 100)
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_current_city.py
import random import time from promptflow import tool @tool def get_current_city(): """Get current city.""" # Generating a random number between 0.2 and 1 for tracing purpose time.sleep(random.uniform(0.2, 1)) return random.choice(["Beijing", "Shanghai"])
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_temperature.py
import random import time from promptflow import tool @tool def get_temperature(city: str, unit: str = "c"): """Estimate the current temperature of a given city. :param city: city to get the estimated temperature for. :type city: str :param unit: the unit of the temperature, either 'c' for Celsius or 'f' for Fahrenheit. Defaults to Celsius ('c'). :type unit: str """ # Generating a random number between 0.2 and 1 for tracing purpose time.sleep(random.uniform(0.2, 1)) return random.uniform(0, 35)
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/add_message_and_run.py
import asyncio import json from openai import AsyncOpenAI from openai.types.beta.threads import MessageContentImageFile, MessageContentText from promptflow import tool, trace from promptflow.connections import OpenAIConnection from promptflow.contracts.multimedia import Image from promptflow.contracts.types import AssistantDefinition from promptflow.exceptions import SystemErrorException from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker URL_PREFIX = "https://platform.openai.com/files/" RUN_STATUS_POLLING_INTERVAL_IN_MILSEC = 1000 @tool async def add_message_and_run( conn: OpenAIConnection, assistant_id: str, thread_id: str, message: list, assistant_definition: AssistantDefinition, download_images: bool, ): cli = await get_openai_api_client(conn) invoker = await get_assisant_tool_invoker(assistant_definition) # Check if assistant id is valid. If not, create a new assistant. # Note: tool registration at run creation, rather than at assistant creation. if not assistant_id: assistant = await create_assistant(cli, assistant_definition) assistant_id = assistant.id await add_message(cli, message, thread_id) run = await start_run(cli, assistant_id, thread_id, assistant_definition, invoker) await wait_for_run_complete(cli, thread_id, invoker, run) messages = await get_message(cli, thread_id) file_id_references = await get_openai_file_references(messages.data[0].content, download_images, conn) return {"content": to_pf_content(messages.data[0].content), "file_id_references": file_id_references} @trace async def get_openai_api_client(conn: OpenAIConnection): cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization) return cli @trace async def get_assisant_tool_invoker(assistant_definition: AssistantDefinition): invoker = AssistantToolInvoker.init(assistant_definition.tools) return invoker @trace async def create_assistant(cli: AsyncOpenAI, assistant_definition: AssistantDefinition): assistant = await cli.beta.assistants.create( instructions=assistant_definition.instructions, model=assistant_definition.model ) print(f"Created assistant: {assistant.id}") return assistant @trace async def add_message(cli: AsyncOpenAI, message: list, thread_id: str): content = extract_text_from_message(message) file_ids = await extract_file_ids_from_message(cli, message) msg = await cli.beta.threads.messages.create(thread_id=thread_id, role="user", content=content, file_ids=file_ids) print("Created message message_id: {msg.id}, assistant_id: {assistant_id}, thread_id: {thread_id}") return msg @trace async def start_run( cli: AsyncOpenAI, assistant_id: str, thread_id: str, assistant_definition: AssistantDefinition, invoker: AssistantToolInvoker, ): tools = invoker.to_openai_tools() run = await cli.beta.threads.runs.create( assistant_id=assistant_id, thread_id=thread_id, model=assistant_definition.model, instructions=assistant_definition.instructions, tools=tools, ) print(f"Assistant_id: {assistant_id}, thread_id: {thread_id}, run_id: {run.id}") return run async def wait_for_status_check(): await asyncio.sleep(RUN_STATUS_POLLING_INTERVAL_IN_MILSEC / 1000.0) async def get_run_status(cli: AsyncOpenAI, thread_id: str, run_id: str): run = await cli.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) print(f"Run status: {run.status}") return run @trace async def get_tool_calls_outputs(invoker: AssistantToolInvoker, run): tool_calls = run.required_action.submit_tool_outputs.tool_calls tool_outputs = [] for tool_call in tool_calls: tool_name = tool_call.function.name tool_args = json.loads(tool_call.function.arguments) print(f"Invoking tool: {tool_call.function.name} with args: {tool_args}") output = invoker.invoke_tool(tool_name, tool_args) tool_outputs.append( { "tool_call_id": tool_call.id, "output": str(output), } ) print(f"Tool output: {str(output)}") return tool_outputs @trace async def submit_tool_calls_outputs(cli: AsyncOpenAI, thread_id: str, run_id: str, tool_outputs: list): await cli.beta.threads.runs.submit_tool_outputs(thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs) print(f"Submitted all required resonses for run: {run_id}") @trace async def require_actions(cli: AsyncOpenAI, thread_id: str, run, invoker: AssistantToolInvoker): tool_outputs = await get_tool_calls_outputs(invoker, run) await submit_tool_calls_outputs(cli, thread_id, run.id, tool_outputs) @trace async def wait_for_run_complete(cli: AsyncOpenAI, thread_id: str, invoker: AssistantToolInvoker, run): while run.status != "completed": await wait_for_status_check() run = await get_run_status(cli, thread_id, run.id) if run.status == "requires_action": await require_actions(cli, thread_id, run, invoker) elif run.status == "in_progress" or run.status == "completed": continue else: raise Exception(f"The assistant tool runs in '{run.status}' status. Message: {run.last_error.message}") @trace async def get_run_steps(cli: AsyncOpenAI, thread_id: str, run_id: str): run_steps = await cli.beta.threads.runs.steps.list(thread_id=thread_id, run_id=run_id) print("step details: \n") for step_data in run_steps.data: print(step_data.step_details) @trace async def get_message(cli: AsyncOpenAI, thread_id: str): messages = await cli.beta.threads.messages.list(thread_id=thread_id) return messages def extract_text_from_message(message: list): content = [] for m in message: if isinstance(m, str): content.append(m) continue message_type = m.get("type", "") if message_type == "text" and "text" in m: content.append(m["text"]) return "\n".join(content) async def extract_file_ids_from_message(cli: AsyncOpenAI, message: list): file_ids = [] for m in message: if isinstance(m, str): continue message_type = m.get("type", "") if message_type == "file_path" and "file_path" in m: path = m["file_path"].get("path", "") if path: file = await cli.files.create(file=open(path, "rb"), purpose="assistants") file_ids.append(file.id) return file_ids async def get_openai_file_references(content: list, download_image: bool, conn: OpenAIConnection): file_id_references = {} for item in content: if isinstance(item, MessageContentImageFile): file_id = item.image_file.file_id if download_image: file_id_references[file_id] = { "content": await download_openai_image(file_id, conn), "url": URL_PREFIX + file_id, } else: file_id_references[file_id] = {"url": URL_PREFIX + file_id} elif isinstance(item, MessageContentText): for annotation in item.text.annotations: if annotation.type == "file_path": file_id = annotation.file_path.file_id file_id_references[file_id] = {"url": URL_PREFIX + file_id} elif annotation.type == "file_citation": file_id = annotation.file_citation.file_id file_id_references[file_id] = {"url": URL_PREFIX + file_id} else: raise Exception(f"Unsupported content type: '{type(item)}'.") return file_id_references def to_pf_content(content: list): pf_content = [] for item in content: if isinstance(item, MessageContentImageFile): file_id = item.image_file.file_id pf_content.append({"type": "image_file", "image_file": {"file_id": file_id}}) elif isinstance(item, MessageContentText): text_dict = {"type": "text", "text": {"value": item.text.value, "annotations": []}} for annotation in item.text.annotations: annotation_dict = { "type": "file_path", "text": annotation.text, "start_index": annotation.start_index, "end_index": annotation.end_index, } if annotation.type == "file_path": annotation_dict["file_path"] = {"file_id": annotation.file_path.file_id} elif annotation.type == "file_citation": annotation_dict["file_citation"] = {"file_id": annotation.file_citation.file_id} text_dict["text"]["annotations"].append(annotation_dict) pf_content.append(text_dict) else: raise SystemErrorException(f"Unsupported content type: {type(item)}") return pf_content async def download_openai_image(file_id: str, conn: OpenAIConnection): cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization) image_data = await cli.files.content(file_id) return Image(image_data.read())
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/get_calorie_by_swimming.py
import random import time from promptflow import tool @tool def get_calorie_by_swimming(duration: float, temperature: float): """Estimate the calories burned by swimming based on duration and temperature. :param duration: the length of the swimming in hours. :type duration: float :param temperature: the environment temperature in degrees Celsius. :type temperature: float """ print( f"Figure out the calories burned by swimming, with temperature of {temperature} degrees Celsius, " f"and duration of {duration} hours." ) # Generating a random number between 0.2 and 1 for tracing purpose time.sleep(random.uniform(0.2, 1)) return random.randint(100, 200)
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/README.md
# Chat with Calorie Assistant This sample demonstrates how to chat with the PromptFlow Assistant tool facilitates calorie calculations by considering your location, the duration of your exercise, and the type of sport. Currently, it supports two types of sports: jogging and swimming. Tools used in this flow: - `add_message_and_run` tool, assistant tool, provisioned with below inner functions: - `get_current_location``: get current city - `get_temperature(location)``: get temperature of the city - `get_calorie_by_jogging(duration, temperature)``: calculate calorie for jogging exercise - `get_calorie_by_jogging(duration, temperature)``: calculate calorie for swimming exercise ## Prerequisites Install promptflow sdk and other dependencies in this folder: ```sh pip install -r requirements.txt ``` ## What you will learn In this flow, you will understand how assistant tools within PromptFlow are triggered by user prompts. The assistant tool decides which internal functions or tools to invoke based on the input provided. Your responsibility involves implementing each of these tools and registering them in the `assistant_definition`. Additionally, be aware that the tools may have dependencies on each other, affecting the order and manner of their invocation. ## Getting started ### 1. Create assistant connection (openai) Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of assistant tool supported connection types and fill in the configurations. Currently, only "Open AI" connection type are supported for assistant tool. Please refer to [OpenAI](https://platform.openai.com/) for more details. ```bash # Override keys with --set to avoid yaml file changes pf connection create --file ../../../connections/openai.yml --set api_key=<your_api_key> ``` Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`. ```bash # show registered connection pf connection show --name open_ai_connection ``` ### 2. Create or get assistant/thread Navigate to the OpenAI Assistant page and create an assistant if you haven't already. Once created, click on the 'Test' button to enter the assistant's playground. Make sure to note down the assistant_id. **[Optional]** Start a chat session to create thread automatically. Keep track of the thread_id. ### 3. run the flow ```bash # run chat flow with default question in flow.dag.yaml pf flow test --flow . --interactive --multi-modal --user-agent "prompt-flow-extension/1.8.0 (win32; x64) VSCode/1.85.1" ```
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/flow.dag.yaml
environment: python_requirements_txt: requirements.txt version: 2 inputs: chat_history: type: list is_chat_history: true default: [] question: type: string is_chat_input: true default: I am going to swim today for 30 min in Guangzhou city, how much calories will I burn? assistant_id: type: string default: "" thread_id: type: string default: "" outputs: answer: type: string reference: ${assistant.output} is_chat_output: true thread_id: type: string reference: ${get_or_create_thread.output} nodes: - name: get_or_create_thread type: python source: type: code path: get_or_create_thread.py inputs: conn: chw-manager-OpenAI thread_id: ${inputs.thread_id} - name: assistant type: python source: type: code path: add_message_and_run.py inputs: conn: chw-manager-OpenAI message: ${inputs.question} assistant_id: ${inputs.assistant_id} thread_id: ${get_or_create_thread.output} download_images: true assistant_definition: assistant_definition.yaml
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/default_input/test_print_input.py
from promptflow import tool @tool def test_print_input(input_str: str, input_bool: bool, input_list: list, input_dict: dict): assert not input_bool assert input_list == [] assert input_dict == {} print(input_str) return input_str
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/default_input/test_print_aggregation.py
from typing import List from promptflow import tool @tool def test_print_input(input_str: List[str], input_bool: List[bool], input_list: List[List], input_dict: List[dict]): assert input_bool[0] == False assert input_list[0] == [] assert input_dict[0] == {} print(input_str) return input_str
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/default_input/flow.dag.yaml
inputs: input_str: type: string default: input value from default input_bool: type: bool default: False input_list: type: list default: [] input_dict: type: object default: {} outputs: output: type: string reference: ${test_print_input.output} nodes: - name: test_print_input type: python source: type: code path: test_print_input.py inputs: input_str: ${inputs.input_str} input_bool: ${inputs.input_bool} input_list: ${inputs.input_list} input_dict: ${inputs.input_dict} - name: aggregate_node type: python source: type: code path: test_print_aggregation.py inputs: input_str: ${inputs.input_str} input_bool: ${inputs.input_bool} input_list: ${inputs.input_list} input_dict: ${inputs.input_dict} aggregation: true use_variants: false
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/default_input/samples.json
[ { "text": "text_1" } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/assistant_definition.yaml
model: gpt-4-1106-preview instructions: You are a helpful assistant. tools: - type: code_interpreter - type: function source: type: code path: get_stock_eod_price.py tool_type: python
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/get_or_create_thread.py
from openai import AsyncOpenAI from promptflow import tool from promptflow.connections import OpenAIConnection @tool async def get_or_create_thread(conn: OpenAIConnection, thread_id: str): if thread_id: return thread_id cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization) thread = await cli.beta.threads.create() return thread.id
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/requirements.txt
promptflow promptflow-tools
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/add_message_and_run.py
import asyncio import json from openai import AsyncOpenAI from openai.types.beta.threads import MessageContentImageFile, MessageContentText from promptflow import tool, trace from promptflow.connections import OpenAIConnection from promptflow.contracts.multimedia import Image from promptflow.contracts.types import AssistantDefinition from promptflow.exceptions import SystemErrorException from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker URL_PREFIX = "https://platform.openai.com/files/" RUN_STATUS_POLLING_INTERVAL_IN_MILSEC = 1000 @tool async def add_message_and_run( conn: OpenAIConnection, assistant_id: str, thread_id: str, message: list, assistant_definition: AssistantDefinition, download_images: bool, ): cli = await get_openai_api_client(conn) invoker = await get_assisant_tool_invoker(assistant_definition) # Check if assistant id is valid. If not, create a new assistant. # Note: tool registration at run creation, rather than at assistant creation. if not assistant_id: assistant = await create_assistant(cli, assistant_definition) assistant_id = assistant.id await add_message(cli, message, thread_id) run = await start_run(cli, assistant_id, thread_id, assistant_definition, invoker) await wait_for_run_complete(cli, thread_id, invoker, run) messages = await get_message(cli, thread_id) file_id_references = await get_openai_file_references(messages.data[0].content, download_images, conn) return {"content": to_pf_content(messages.data[0].content), "file_id_references": file_id_references} @trace async def get_openai_api_client(conn: OpenAIConnection): cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization) return cli @trace async def get_assisant_tool_invoker(assistant_definition: AssistantDefinition): invoker = AssistantToolInvoker.init(assistant_definition.tools) return invoker @trace async def create_assistant(cli: AsyncOpenAI, assistant_definition: AssistantDefinition): assistant = await cli.beta.assistants.create( instructions=assistant_definition.instructions, model=assistant_definition.model ) print(f"Created assistant: {assistant.id}") return assistant @trace async def add_message(cli: AsyncOpenAI, message: list, thread_id: str): content = extract_text_from_message(message) file_ids = await extract_file_ids_from_message(cli, message) msg = await cli.beta.threads.messages.create(thread_id=thread_id, role="user", content=content, file_ids=file_ids) print("Created message message_id: {msg.id}, assistant_id: {assistant_id}, thread_id: {thread_id}") return msg @trace async def start_run( cli: AsyncOpenAI, assistant_id: str, thread_id: str, assistant_definition: AssistantDefinition, invoker: AssistantToolInvoker, ): tools = invoker.to_openai_tools() run = await cli.beta.threads.runs.create( assistant_id=assistant_id, thread_id=thread_id, model=assistant_definition.model, instructions=assistant_definition.instructions, tools=tools, ) print(f"Assistant_id: {assistant_id}, thread_id: {thread_id}, run_id: {run.id}") return run async def wait_for_status_check(): await asyncio.sleep(RUN_STATUS_POLLING_INTERVAL_IN_MILSEC / 1000.0) async def get_run_status(cli: AsyncOpenAI, thread_id: str, run_id: str): run = await cli.beta.threads.runs.retrieve(thread_id=thread_id, run_id=run_id) print(f"Run status: {run.status}") return run @trace async def get_tool_calls_outputs(invoker: AssistantToolInvoker, run): tool_calls = run.required_action.submit_tool_outputs.tool_calls tool_outputs = [] for tool_call in tool_calls: tool_name = tool_call.function.name tool_args = json.loads(tool_call.function.arguments) print(f"Invoking tool: {tool_call.function.name} with args: {tool_args}") output = invoker.invoke_tool(tool_name, tool_args) tool_outputs.append( { "tool_call_id": tool_call.id, "output": str(output), } ) print(f"Tool output: {str(output)}") return tool_outputs @trace async def submit_tool_calls_outputs(cli: AsyncOpenAI, thread_id: str, run_id: str, tool_outputs: list): await cli.beta.threads.runs.submit_tool_outputs(thread_id=thread_id, run_id=run_id, tool_outputs=tool_outputs) print(f"Submitted all required resonses for run: {run_id}") @trace async def require_actions(cli: AsyncOpenAI, thread_id: str, run, invoker: AssistantToolInvoker): tool_outputs = await get_tool_calls_outputs(invoker, run) await submit_tool_calls_outputs(cli, thread_id, run.id, tool_outputs) @trace async def wait_for_run_complete(cli: AsyncOpenAI, thread_id: str, invoker: AssistantToolInvoker, run): while run.status != "completed": await wait_for_status_check() run = await get_run_status(cli, thread_id, run.id) if run.status == "requires_action": await require_actions(cli, thread_id, run, invoker) elif run.status == "in_progress" or run.status == "completed": continue else: raise Exception(f"The assistant tool runs in '{run.status}' status. Message: {run.last_error.message}") @trace async def get_run_steps(cli: AsyncOpenAI, thread_id: str, run_id: str): run_steps = await cli.beta.threads.runs.steps.list(thread_id=thread_id, run_id=run_id) print("step details: \n") for step_data in run_steps.data: print(step_data.step_details) @trace async def get_message(cli: AsyncOpenAI, thread_id: str): messages = await cli.beta.threads.messages.list(thread_id=thread_id) return messages def extract_text_from_message(message: list): content = [] for m in message: if isinstance(m, str): content.append(m) continue message_type = m.get("type", "") if message_type == "text" and "text" in m: content.append(m["text"]) return "\n".join(content) async def extract_file_ids_from_message(cli: AsyncOpenAI, message: list): file_ids = [] for m in message: if isinstance(m, str): continue message_type = m.get("type", "") if message_type == "file_path" and "file_path" in m: path = m["file_path"].get("path", "") if path: file = await cli.files.create(file=open(path, "rb"), purpose="assistants") file_ids.append(file.id) return file_ids async def get_openai_file_references(content: list, download_image: bool, conn: OpenAIConnection): file_id_references = {} for item in content: if isinstance(item, MessageContentImageFile): file_id = item.image_file.file_id if download_image: file_id_references[file_id] = { "content": await download_openai_image(file_id, conn), "url": URL_PREFIX + file_id, } else: file_id_references[file_id] = {"url": URL_PREFIX + file_id} elif isinstance(item, MessageContentText): for annotation in item.text.annotations: if annotation.type == "file_path": file_id = annotation.file_path.file_id file_id_references[file_id] = {"url": URL_PREFIX + file_id} elif annotation.type == "file_citation": file_id = annotation.file_citation.file_id file_id_references[file_id] = {"url": URL_PREFIX + file_id} else: raise Exception(f"Unsupported content type: '{type(item)}'.") return file_id_references def to_pf_content(content: list): pf_content = [] for item in content: if isinstance(item, MessageContentImageFile): file_id = item.image_file.file_id pf_content.append({"type": "image_file", "image_file": {"file_id": file_id}}) elif isinstance(item, MessageContentText): text_dict = {"type": "text", "text": {"value": item.text.value, "annotations": []}} for annotation in item.text.annotations: annotation_dict = { "type": "file_path", "text": annotation.text, "start_index": annotation.start_index, "end_index": annotation.end_index, } if annotation.type == "file_path": annotation_dict["file_path"] = {"file_id": annotation.file_path.file_id} elif annotation.type == "file_citation": annotation_dict["file_citation"] = {"file_id": annotation.file_citation.file_id} text_dict["text"]["annotations"].append(annotation_dict) pf_content.append(text_dict) else: raise SystemErrorException(f"Unsupported content type: {type(item)}") return pf_content async def download_openai_image(file_id: str, conn: OpenAIConnection): cli = AsyncOpenAI(api_key=conn.api_key, organization=conn.organization) image_data = await cli.files.content(file_id) return Image(image_data.read())
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/README.md
# Stock EOD Price Analyzer This sample demonstrates how the PromptFlow Assistant tool help with time series data (stock EOD price) retrieval, plot and consolidation. Tools used in this flow: - `get_or_create_thread` tool, python tool, used to provide assistant thread information if absent - `add_message_and_run` tool, assistant tool, provisioned with below inner functions: - `get_stock_eod_price``: get the stock eod price based on date and company name ## Prerequisites Install promptflow sdk and other dependencies in this folder: ```bash pip install -r requirements.txt ``` ## What you will learn In this flow, you will understand how assistant tools within PromptFlow are triggered by user prompts. The assistant tool decides which internal functions or tools to invoke based on the input provided. Your responsibility involves implementing each of these tools and registering them in the `assistant_definition`. Additionally, be aware that the tools may have dependencies on each other, affecting the order and manner of their invocation. ## Getting started ### 1. Create assistant connection (openai) Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of LLM tool supported connection types and fill in the configurations. Currently, only "Open AI" connection type are supported for assistant tool. Please refer to [OpenAI](https://platform.openai.com/) for more details. ```bash # Override keys with --set to avoid yaml file changes pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> ``` Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`. ```bash # show registered connection pf connection show --name open_ai_connection ``` ### 2. Create or get assistant/thread Navigate to the OpenAI Assistant page and create an assistant if you haven't already. Once created, click on the 'Test' button to enter the assistant's playground. Make sure to note down the assistant_id. **[Optional]** Start a chat session to create thread automatically. Keep track of the thread_id. ### 3. run the flow ```bash # run chat flow with default question in flow.dag.yaml pf flow test --flow . ```
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/flow.dag.yaml
version: 2 inputs: assistant_input: type: list default: - type: text text: The provided file contains end-of-day (EOD) stock prices for companies A and B across various dates in March. However, it does not include the EOD stock prices for Company C. - type: file_path file_path: path: ./stock_price.csv - type: text text: Please draw a line chart with the stock price of the company A, B and C and return a CVS file with the data. assistant_id: type: string default: asst_eHO2rwEYqGH3pzzHHov2kBCG thread_id: type: string default: "" outputs: assistant_output: type: string reference: ${add_message_and_run.output} thread_id: type: string reference: ${get_or_create_thread.output} nodes: - name: get_or_create_thread type: python source: type: code path: get_or_create_thread.py inputs: conn: chw_openai thread_id: ${inputs.thread_id} - name: add_message_and_run type: python source: type: code path: add_message_and_run.py inputs: conn: chw_openai message: ${inputs.assistant_input} assistant_id: ${inputs.assistant_id} thread_id: ${get_or_create_thread.output} assistant_definition: assistant_definition.yaml download_images: true
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/get_stock_eod_price.py
import random import time from promptflow import tool @tool def get_stock_eod_price(date: str, company: str): """Get the stock end of day price by date and symbol. :param date: the date of the stock price. e.g. 2021-01-01 :type date: str :param company: the company name like A, B, C :type company: str """ print(f"Try to get the stock end of day price by date {date} and company {company}.") # Sleep a random number between 0.2s and 1s for tracing purpose time.sleep(random.uniform(0.2, 1)) return random.uniform(110, 130)
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/stock_price.csv
Date,A,B 2023-03-15,100.25,110.50 2023-03-16,102.75,114.35 2023-03-17,101.60,120.10
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_stream_tools/echo_input.py
from promptflow import tool @tool def my_python_tool(input: str) -> str: yield "Echo: " for word in input.split(): yield word + " "
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_stream_tools/flow.dag.yaml
inputs: text: type: string outputs: output_echo: type: string reference: ${echo_my_input.output} nodes: - name: echo_my_input type: python source: type: code path: echo_input.py inputs: input: ${inputs.text}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_stream_tools/inputs.json
{ "text": "Hello World!" }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/classify_with_llm.jinja2
system: Your task is to classify a given url into one of the following types: Movie, App, Academic, Channel, Profile, PDF or None based on the text content information. The classification will be based on the url, the webpage text content summary, or both. user: Here are a few examples: {% for ex in examples %} URL: {{ex.url}} Text content: {{ex.text_content}} OUTPUT: {"category": "{{ex.category}}", "evidence": "{{ex.evidence}}"} {% endfor %} For a given URL : {{url}}, and text content: {{text_content}}. Classify above url to complete the category and indicate evidence. OUTPUT:
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/fetch_text_content_from_url.py
import bs4 import requests from promptflow import tool @tool def fetch_text_content_from_url(url: str): # Send a request to the URL try: headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35" } response = requests.get(url, headers=headers) if response.status_code == 200: # Parse the HTML content using BeautifulSoup soup = bs4.BeautifulSoup(response.text, "html.parser") soup.prettify() return soup.get_text()[:2000] else: msg = ( f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: {response.text[:100]}" ) print(msg) return "No available content" except Exception as e: print("Get url failed with error: {}".format(e)) return "No available content"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/convert_to_dict.py
from promptflow import tool @tool def convert_to_dict(input_str: str): raise Exception("mock exception")
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/summarize_text_content__variant_1.jinja2
system: Please summarize some keywords of this paragraph and have some details of each keywords. Do not add any information that is not in the text. user: Text: {{text}} Summary:
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/summarize_text_content.jinja2
system: Please summarize the following text in one paragraph. 100 words. Do not add any information that is not in the text. user: Text: {{text}} Summary:
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/flow.dag.yaml
inputs: url: type: string default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h outputs: category: type: string reference: ${convert_to_dict.output.category} evidence: type: string reference: ${convert_to_dict.output.evidence} nodes: - name: fetch_text_content_from_url type: python source: type: code path: fetch_text_content_from_url.py inputs: url: ${inputs.url} - name: summarize_text_content type: llm use_variants: true - name: prepare_examples type: python source: type: code path: prepare_examples.py inputs: {} - name: classify_with_llm type: llm source: type: code path: classify_with_llm.jinja2 inputs: deployment_name: gpt-35-turbo suffix: '' max_tokens: '128' temperature: '0.1' top_p: '1.0' logprobs: '' echo: 'False' stop: '' presence_penalty: '0' frequency_penalty: '0' best_of: '1' logit_bias: '' url: ${inputs.url} examples: ${prepare_examples.output} text_content: ${summarize_text_content.output} provider: AzureOpenAI connection: azure_open_ai_connection api: chat module: promptflow.tools.aoai - name: convert_to_dict type: python source: type: code path: convert_to_dict.py inputs: input_str: ${classify_with_llm.output} node_variants: summarize_text_content: default_variant_id: variant_1 variants: variant_0: node: type: llm source: type: code path: summarize_text_content.jinja2 inputs: deployment_name: gpt-35-turbo suffix: '' max_tokens: '128' temperature: '0.2' top_p: '1.0' logprobs: '' echo: 'False' stop: '' presence_penalty: '0' frequency_penalty: '0' best_of: '1' logit_bias: '' text: ${fetch_text_content_from_url.output} provider: AzureOpenAI connection: azure_open_ai_connection api: chat module: promptflow.tools.aoai variant_1: node: type: llm source: type: code path: summarize_text_content__variant_1.jinja2 inputs: deployment_name: gpt-35-turbo suffix: '' max_tokens: '256' temperature: '0.3' top_p: '1.0' logprobs: '' echo: 'False' stop: '' presence_penalty: '0' frequency_penalty: '0' best_of: '1' logit_bias: '' text: ${fetch_text_content_from_url.output} provider: AzureOpenAI connection: azure_open_ai_connection api: chat module: promptflow.tools.aoai
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/prepare_examples.py
from promptflow import tool @tool def prepare_examples(): return [ { "url": "https://play.google.com/store/apps/details?id=com.spotify.music", "text_content": "Spotify is a free music and podcast streaming app with millions of songs, albums, and original podcasts. It also offers audiobooks, so users can enjoy thousands of stories. It has a variety of features such as creating and sharing music playlists, discovering new music, and listening to popular and exclusive podcasts. It also has a Premium subscription option which allows users to download and listen offline, and access ad-free music. It is available on all devices and has a variety of genres and artists to choose from.", "category": "App", "evidence": "Both", }, { "url": "https://www.youtube.com/channel/UC_x5XG1OV2P6uZZ5FSM9Ttw", "text_content": "NFL Sunday Ticket is a service offered by Google LLC that allows users to watch NFL games on YouTube. It is available in 2023 and is subject to the terms and privacy policy of Google LLC. It is also subject to YouTube's terms of use and any applicable laws.", "category": "Channel", "evidence": "URL", }, { "url": "https://arxiv.org/abs/2303.04671", "text_content": "Visual ChatGPT is a system that enables users to interact with ChatGPT by sending and receiving not only languages but also images, providing complex visual questions or visual editing instructions, and providing feedback and asking for corrected results. It incorporates different Visual Foundation Models and is publicly available. Experiments show that Visual ChatGPT opens the door to investigating the visual roles of ChatGPT with the help of Visual Foundation Models.", "category": "Academic", "evidence": "Text content", }, { "url": "https://ab.politiaromana.ro/", "text_content": "There is no content available for this text.", "category": "None", "evidence": "None", }, ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/samples.json
[ { "url": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h" }, { "url": "https://www.microsoft.com/en-us/windows/" } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/fail.py
from aaa import bbb # noqa: F401
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/flow.dag.yaml
inputs: text: type: string outputs: output: type: string reference: ${node1.output} nodes: - name: node1 type: python source: type: code path: dummy_utils/main.py inputs: x: ${inputs.text}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/dummy_utils/main.meta.json
{ "name": "main", "type": "python", "inputs": { "x": { "type": [ "string" ] } }, "source": "dummy_utils/main.py", "function": "main" }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/dummy_utils/main.py
from promptflow import tool from dummy_utils.util_tool import passthrough @tool def main(x: str): return passthrough(x)
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/script_with_import/dummy_utils/util_tool.py
from promptflow import tool @tool def passthrough(x: str): return x
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/fetch_text_content_from_url.py
import bs4 import requests from promptflow import tool @tool def fetch_text_content_from_url(url: str): # Send a request to the URL try: # time.sleep(130) headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35" } response = requests.get(url, headers=headers) if response.status_code == 200: # Parse the HTML content using BeautifulSoup soup = bs4.BeautifulSoup(response.text, "html.parser") soup.prettify() return soup.get_text()[:2000] else: msg = ( f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: " f"{response.text[:100]}" ) print(msg) return "No available content" except Exception as e: print("Get url failed with error: {}".format(e)) return "No available content"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/flow.dag.yaml
id: web_classification inputs: url: default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h is_chat_input: false type: string nodes: - inputs: url: ${inputs.url} name: fetch_text_content_from_url reduce: false source: path: fetch_text_content_from_url.py type: code type: python outputs: text: evaluation_only: false is_chat_output: false reference: ${fetch_text_content_from_url.output} type: string
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/.amlignore
ignored_folder *.ignored
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_composite_image/merge_images.py
from promptflow import tool from promptflow.contracts.multimedia import Image @tool def merge_images(image_list: list, image_dict: list): res = set() for item in image_list[0]: res.add(item) for _, v in image_dict[0].items(): res.add(v) assert all(isinstance(item, Image) for item in res) return list(res)
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_composite_image/passthrough_list.py
from promptflow import tool from promptflow.contracts.multimedia import Image @tool def passthrough_list(image_list: list, image_dict: dict): assert all(isinstance(item, Image) for item in image_list) return image_list
0