repo_id
stringlengths
15
132
file_path
stringlengths
34
176
content
stringlengths
2
3.52M
__index_level_0__
int64
0
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/all_depedencies_bypassed_with_activate_met/pass_through.py
from promptflow import tool @tool def pass_through(input1: str="Execution") -> str: return input1
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/all_depedencies_bypassed_with_activate_met/expected_result.json
[ { "expected_node_count": 2, "expected_outputs": { "output": "Execution" }, "expected_bypassed_nodes": [ "nodeA" ] } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/all_depedencies_bypassed_with_activate_met/flow.dag.yaml
inputs: text: type: string default: hi outputs: output: type: string reference: ${nodeB.output} nodes: - name: nodeA type: python source: type: code path: pass_through.py inputs: input1: ${inputs.text} activate: when: ${inputs.text} is: world - name: nodeB type: python source: type: code path: pass_through.py inputs: input1: ${nodeA.output} activate: when: ${inputs.text} is: hi
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/pick_an_image.py
import random from promptflow.contracts.multimedia import Image from promptflow import tool @tool def pick_an_image(image_1: Image, image_2: Image) -> Image: if random.choice([True, False]): return image_1 else: return image_2
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/inputs.jsonl
{"image": {"data:image/png;path":"logo.jpg"}} {"image": {"data:image/png;path":"logo_2.png"}}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/flow.dag.yaml
inputs: image: type: image default: logo.jpg outputs: output: type: image reference: ${python_node_2.output} nodes: - name: python_node type: python source: type: code path: pick_an_image.py inputs: image_1: ${inputs.image} image_2: logo_2.png - name: python_node_2 type: python source: type: code path: pick_an_image.py inputs: image_1: ${python_node.output} image_2: logo_2.png
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/image_inputs/inputs.jsonl
{"image": {"data:image/png;path":"logo_1.png"}} {"image": {"data:image/png;path":"logo_2.png"}}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/fetch_text_content_from_url.py
import bs4 import requests from promptflow import tool @tool def fetch_text_content_from_url(url: str): # Send a request to the URL try: # time.sleep(130) headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35" } response = requests.get(url, headers=headers) if response.status_code == 200: # Parse the HTML content using BeautifulSoup soup = bs4.BeautifulSoup(response.text, "html.parser") soup.prettify() return soup.get_text()[:2000] else: msg = ( f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: " f"{response.text[:100]}" ) print(msg) return "No available content" except Exception as e: print("Get url failed with error: {}".format(e)) return "No available content"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/.amlignore
ignored_folder *.ignored
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_ignore_file/flow.dag.yaml
id: web_classification inputs: url: default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h is_chat_input: false type: string nodes: - inputs: url: ${inputs.url} name: fetch_text_content_from_url reduce: false source: path: fetch_text_content_from_url.py type: code type: python outputs: text: evaluation_only: false is_chat_output: false reference: ${fetch_text_content_from_url.output} type: string
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/custom_connection_flow/flow.dag.yaml
inputs: key: type: string outputs: output: type: string reference: ${print_env.output.value} nodes: - name: print_env type: python source: type: code path: print_env.py inputs: key: ${inputs.key} connection: custom_connection
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/custom_connection_flow/print_env.py
import os from promptflow import tool from promptflow.connections import CustomConnection @tool def get_env_var(key: str, connection: CustomConnection): # get from env var return {"value": os.environ.get(key)}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/custom_connection_flow
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/custom_connection_flow/.promptflow/flow.tools.json
{ "package": {}, "code": { "print_env.py": { "type": "python", "inputs": { "key": { "type": [ "string" ] } }, "function": "get_env_var" } } }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_script_tool_with_custom_strong_type_connection/data.jsonl
{"text": "Hello World!"}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_script_tool_with_custom_strong_type_connection/my_script_tool.py
from promptflow import tool from promptflow.connections import CustomStrongTypeConnection, CustomConnection from promptflow.contracts.types import Secret class MyCustomConnection(CustomStrongTypeConnection): """My custom strong type connection. :param api_key: The api key. :type api_key: String :param api_base: The api base. :type api_base: String """ api_key: Secret api_url: str = "This is a fake api url." @tool def my_tool(connection: MyCustomConnection, input_param: str) -> str: # Replace with your tool code. # Use custom strong type connection like: connection.api_key, connection.api_url return f"connection_value is MyCustomConnection: {str(isinstance(connection, MyCustomConnection))}"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_script_tool_with_custom_strong_type_connection/flow.dag.yaml
inputs: text: type: string default: this is an input outputs: out: type: string reference: ${my_script_tool.output} nodes: - name: my_script_tool type: python source: type: code path: my_script_tool.py inputs: connection: custom_connection_2 input_param: ${inputs.text}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/unordered_nodes_with_skip/flow.dag.yaml
name: node_wrong_order inputs: text: type: string skip: type: bool outputs: result: type: string reference: ${third_node} nodes: - name: third_node type: python source: type: code path: test.py inputs: text: ${second_node} - name: first_node type: python source: type: code path: test.py inputs: text: ${inputs.text} - name: second_node type: python source: type: code path: test.py inputs: text: ${first_node} skip: when: ${inputs.skip} is: true return: ${inputs.text}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_aggregation/passthrough.py
from promptflow import tool @tool def passthrough(input: str): return input
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_aggregation/accuracy.py
from promptflow import tool, log_metric from typing import List @tool def accuracy(answer: List[str], groundtruth: List[str]): assert isinstance(answer, list) correct = 0 for a, g in zip(answer, groundtruth): if a == g: correct += 1 accuracy = float(correct) / len(answer) log_metric("accuracy", accuracy) return accuracy
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_aggregation/flow.dag.yaml
inputs: text: type: string default: "play" outputs: answer: type: string reference: ${passthrough.output} nodes: - name: passthrough type: python source: type: code path: passthrough.py inputs: input: ${inputs.text} - name: accuracy type: python source: type: code path: accuracy.py inputs: answer: ${passthrough.output} groundtruth: ${inputs.text} aggregation: True
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/samples.json
[ {"idx": 1, "mod": 3, "mod_2": 5}, {"idx": 2, "mod": 3, "mod_2": 5}, {"idx": 3, "mod": 3, "mod_2": 5}, {"idx": 4, "mod": 3, "mod_2": 5}, {"idx": 5, "mod": 3, "mod_2": 5}, {"idx": 6, "mod": 3, "mod_2": 5}, {"idx": 7, "mod": 3, "mod_2": 5}, {"idx": 8, "mod": 3, "mod_2": 5}, {"idx": 9, "mod": 3, "mod_2": 5}, {"idx": 10, "mod": 3, "mod_2": 5} ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/my_python_tool_with_failed_line.py
from promptflow import tool @tool def my_python_tool_with_failed_line(idx: int, mod) -> int: if idx % mod == 0: raise Exception("Failed") return idx
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/expected_status_summary.json
{ "my_python_tool_with_failed_line_1.completed": 7, "my_python_tool_with_failed_line_1.failed": 3, "my_python_tool_with_failed_line_2.completed": 5, "my_python_tool_with_failed_line_2.failed": 2 }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/flow.dag.yaml
inputs: idx: type: int mod: type: int mod_2: type: int outputs: output: type: int reference: ${my_python_tool_with_failed_line_2.output} nodes: - name: my_python_tool_with_failed_line_1 type: python source: type: code path: my_python_tool_with_failed_line.py inputs: idx: ${inputs.idx} mod: ${inputs.mod} - name: my_python_tool_with_failed_line_2 type: python source: type: code path: my_python_tool_with_failed_line.py inputs: idx: ${my_python_tool_with_failed_line_1.output} mod: ${inputs.mod_2}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/inputs/data.jsonl
{"mod": 2, "mod_2": 5} {"mod": 2, "mod_2": 5} {"mod": 2, "mod_2": 5} {"mod": 2, "mod_2": 5} {"mod": 2, "mod_2": 5} {"mod": 2, "mod_2": 5} {"mod": 2, "mod_2": 5}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/inputs/output.jsonl
{"idx": 1, "line_number": 0} {"idx": 2, "line_number": 1} {"idx": 4, "line_number": 3} {"idx": 5, "line_number": 4} {"idx": 7, "line_number": 6} {"idx": 8, "line_number": 7} {"idx": 10, "line_number": 9}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/failed_flow/hello.py
import os import openai from dotenv import load_dotenv from promptflow import tool # The inputs section will change based on the arguments of the tool function, after you save the code # Adding type to arguments and return value will help the system show the types properly # Please update the function name/signature per need def to_bool(value) -> bool: return str(value).lower() == "true" @tool def my_python_tool(input1: str) -> str: return 'hello '
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/failed_flow/flow.dag.yaml
inputs: text: type: string outputs: output_prompt: type: string reference: ${echo_my_prompt.output} nodes: - inputs: text: ${inputs.text} name: echo_my_prompt type: python source: type: code path: hello.py node_variants: {}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_fetch_url/fetch_text_content_from_url.py
import bs4 import requests from promptflow import tool @tool def fetch_text_content_from_url(url: str): # Send a request to the URL try: # time.sleep(130) headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " "Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35" } response = requests.get(url, headers=headers) if response.status_code == 200: # Parse the HTML content using BeautifulSoup soup = bs4.BeautifulSoup(response.text, "html.parser") soup.prettify() return soup.get_text()[:2000] else: msg = ( f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: " f"{response.text[:100]}" ) print(msg) return "No available content" except Exception as e: print("Get url failed with error: {}".format(e)) return "No available content"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_fetch_url/flow.dag.yaml
id: web_classification inputs: url: default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h is_chat_input: false type: string nodes: - inputs: url: ${inputs.url} name: fetch_text_content_from_url reduce: false source: path: fetch_text_content_from_url.py type: code type: python outputs: text: evaluation_only: false is_chat_output: false reference: ${fetch_text_content_from_url.output} type: string
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/print_env_var/flow.dag.yaml
inputs: key: type: string outputs: output: type: string reference: ${print_env.output.value} nodes: - name: print_env type: python source: type: code path: print_env.py inputs: key: ${inputs.key}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/print_env_var/print_env.py
import os from promptflow import tool @tool def get_env_var(key: str): print(os.environ.get(key)) # get from env var return {"value": os.environ.get(key)}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/print_env_var
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/print_env_var/.promptflow/flow.tools.json
{ "package": {}, "code": { "print_env.py": { "type": "python", "inputs": { "key": { "type": [ "string" ] } }, "function": "get_env_var" } } }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/inputs.json
{ "input1": "False", "input2": "False", "input3": "False", "input4": "False" }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/wait_short.py
import threading from time import sleep from promptflow import tool @tool def wait(**kwargs) -> int: if kwargs["throw_exception"]: raise Exception("test exception") for i in range(10): print(f"Thread {threading.get_ident()} write test log number {i}") sleep(2) return 0
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/wait_long.py
from time import sleep from promptflow import tool @tool def wait(**args) -> int: sleep(5) return str(args)
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/flow.dag.yaml
name: TestPythonToolLongWaitTime inputs: input1: type: bool input2: type: bool input3: type: bool input4: type: bool outputs: output: type: int reference: ${wait_long_1.output} nodes: - name: wait_1 type: python source: type: code path: wait_short.py inputs: throw_exception: ${inputs.input1} - name: wait_2 type: python source: type: code path: wait_short.py inputs: throw_exception: ${inputs.input2} - name: wait_3 type: python source: type: code path: wait_short.py inputs: throw_exception: ${inputs.input3} - name: wait_4 type: python source: type: code path: wait_short.py inputs: throw_exception: ${inputs.input4} - name: wait_long_1 type: python source: type: code path: wait_long.py inputs: text_1: ${wait_1.output} text_2: ${wait_2.output} text_3: ${wait_3.output} text_4: ${wait_4.output}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_requirements_txt/requirements.txt
langchain
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_requirements_txt/flow.dag.yaml
inputs: key: type: string outputs: output: type: string reference: ${print_env.output.value} nodes: - name: print_env type: python source: type: code path: print_env.py inputs: key: ${inputs.key}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_requirements_txt/print_env.py
import os from promptflow import tool @tool def get_env_var(key: str): from langchain import __version__ print(__version__) print(os.environ.get(key)) # get from env var return {"value": os.environ.get(key)}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/all_nodes_bypassed/inputs.json
{ "text": "bypass" }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/all_nodes_bypassed/flow.dag.yaml
name: all_nodes_bypassed inputs: text: type: string outputs: result: type: string reference: ${third_node.output} nodes: - name: first_node type: python source: type: code path: test.py inputs: text: ${inputs.text} activate: when: ${inputs.text} is: "hello" - name: second_node type: python source: type: code path: test.py inputs: text: ${first_node.output} - name: third_node type: python source: type: code path: test.py inputs: text: ${second_node.output}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/all_nodes_bypassed/test.py
from promptflow import tool @tool def test(text: str): return text + "hello world!"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/generator_nodes/echo.py
from promptflow import tool @tool def echo(text): """yield the input string.""" echo_text = "Echo - " + text for word in echo_text.split(): yield word
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/generator_nodes/flow.dag.yaml
inputs: text: type: string outputs: answer: type: string reference: ${echo_generator.output} nodes: - name: echo_generator type: python source: type: code path: echo.py inputs: text: ${inputs.text}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_trace_async/greetings.py
import asyncio from time import sleep from promptflow import tool, trace @trace async def is_valid_name(name): await asyncio.sleep(0.5) return len(name) > 0 @trace async def get_user_name(user_id): await asyncio.sleep(0.5) user_name = f"User {user_id}" if not await is_valid_name(user_name): raise ValueError(f"Invalid user name: {user_name}") return user_name @trace async def format_greeting(user_name): await asyncio.sleep(0.5) return f"Hello, {user_name}!" @tool async def greetings(user_id): user_name = await get_user_name(user_id) greeting = await format_greeting(user_name) print(greeting) return {"greeting": greeting}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_trace_async/flow.dag.yaml
inputs: user_id: type: int default: 1 outputs: output: type: string reference: ${greetings.output.greeting} nodes: - name: greetings type: python source: type: code path: greetings.py inputs: user_id: ${inputs.user_id}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/get_dict_val.py
from promptflow import tool @tool def get_dict_val(key): # get from env var print(key) if not isinstance(key, dict): raise TypeError(f"key must be a dict, got {type(key)}") return {"value": f"{key}: {type(key)}", "origin_value": key}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/flow.dag.yaml
inputs: key: type: object outputs: output: type: string reference: ${get_dict_val.output.value} nodes: - name: get_dict_val type: python source: type: code path: get_dict_val.py inputs: key: ${inputs.key} - name: print_val type: python source: type: code path: print_val.py inputs: val: ${get_dict_val.output.value} origin_val: ${get_dict_val.output.origin_value}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/print_val.py
from promptflow import tool @tool def print_val(val, origin_val): print(val) print(origin_val) if not isinstance(origin_val, dict): raise TypeError(f"key must be a dict, got {type(origin_val)}") return val
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/.promptflow/flow.tools.json
{ "package": {}, "code": { "print_val.py": { "name": "print_val.py", "type": "python", "inputs": { "key": { "type": [ "object" ] } }, "source": "print_val.py", "function": "get_val" } } }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_default_history/chat.jinja2
system: You are a helpful assistant. {% for item in chat_history %} user: {{item.inputs.question}} assistant: {{item.outputs.answer}} {% endfor %} user: {{question}}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_default_history/flow.dag.yaml
inputs: chat_history: type: list is_chat_history: true default: - inputs: question: hi outputs: answer: hi - inputs: question: who are you outputs: answer: who are you question: type: string is_chat_input: true default: What is ChatGPT? outputs: answer: type: string reference: ${chat_node.output} is_chat_output: true nodes: - inputs: deployment_name: gpt-35-turbo max_tokens: "256" temperature: "0.7" chat_history: ${inputs.chat_history} question: ${inputs.question} name: chat_node type: llm source: type: code path: chat.jinja2 api: chat provider: AzureOpenAI connection: azure_open_ai_connection
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/no_inputs_outputs/say_hello.py
from promptflow import tool @tool def stringify_num(): print("hello world")
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/no_inputs_outputs/flow.dag.yaml
outputs: nodes: - name: say_hello type: python source: type: code path: say_hello.py
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/inputs.json
[ { "incident_id": 1, "incident_content": "Incident 418856448 : Stale App Deployment for App promptflow" }, { "incident_id": 3, "incident_content": "Incident 418856448 : Stale App Deployment for App promptflow" }, { "incident_id": 0, "incident_content": "Incident 418856448 : Stale App Deployment for App promptflow" } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/investigation_method.py
from promptflow import tool @tool def choose_investigation_method(method1="Skip job info extractor", method2="Skip incident info extractor"): method = {} if method1: method["first"] = method1 if method2: method["second"] = method2 return method
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/icm_retriever.py
from promptflow import tool @tool def icm_retriever(content: str) -> str: return "ICM: " + content
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/expected_result.json
[ { "expected_node_count": 9, "expected_outputs":{ "investigation_method": { "first": "Skip job info extractor", "second": "Execute incident info extractor" } }, "expected_bypassed_nodes":["job_info_extractor", "icm_retriever"] }, { "expected_node_count": 9, "expected_outputs":{ "investigation_method": { "first": "Execute job info extractor", "second": "Skip incident info extractor" } }, "expected_bypassed_nodes":["incident_info_extractor", "icm_retriever", "kql_tsg_retriever", "tsg_retriever", "investigation_steps", "retriever_summary"] }, { "expected_node_count": 9, "expected_outputs":{ "investigation_method": { "first": "Skip job info extractor", "second": "Execute incident info extractor" } }, "expected_bypassed_nodes":["job_info_extractor", "kql_tsg_retriever", "tsg_retriever"] } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/retriever_summary.py
from promptflow import tool @tool def retriever_summary(summary) -> str: print(f"Summary: {summary}") return "Execute incident info extractor"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/kql_tsg_retriever.py
from promptflow import tool @tool def kql_retriever(content: str) -> str: return "KQL: " + content
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/tsg_retriever.py
from promptflow import tool @tool def tsg_retriever(content: str) -> str: return "TSG: " + content
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/incident_id_extractor.py
from promptflow import tool @tool def extract_incident_id(incident_content: str, incident_id: int): if incident_id >= 0 and incident_id < 3: return { "has_incident_id": True, "incident_id": incident_id, "incident_content": incident_content } return { "has_incident_id": False, "incident_id": incident_id, "incident_content": incident_content }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/expected_status_summary.json
{ "incident_id_extractor.completed": 3, "job_info_extractor.completed": 1, "job_info_extractor.bypassed": 2, "incident_info_extractor.completed": 2, "incident_info_extractor.bypassed": 1, "icm_retriever.completed": 1, "icm_retriever.bypassed": 2, "tsg_retriever.completed": 1, "tsg_retriever.bypassed": 2, "kql_tsg_retriever.completed": 1, "kql_tsg_retriever.bypassed": 2, "investigation_steps.completed": 2, "investigation_steps.bypassed": 1, "retriever_summary.completed": 2, "retriever_summary.bypassed": 1, "investigation_method.completed": 3 }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/investigation_steps.jinja2
system: You are a helpful assistant. user: When an incident occurs, there have 3 ways to deal with it, please choose one. 1. {{first_method}} 2. {{second_method}} 3. {{third_method}}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/flow.dag.yaml
id: template_standard_flow name: Template Standard Flow inputs: incident_content: type: string incident_id: type: int outputs: investigation_method: type: string reference: ${investigation_method.output} nodes: - name: incident_id_extractor type: python source: type: code path: incident_id_extractor.py inputs: incident_content: ${inputs.incident_content} incident_id: ${inputs.incident_id} - name: job_info_extractor type: python source: type: code path: job_info_extractor.py inputs: incident_content: ${incident_id_extractor.output.incident_content} activate: when: ${incident_id_extractor.output.has_incident_id} is: false - name: incident_info_extractor type: python source: type: code path: incident_info_extractor.py inputs: incident: ${incident_id_extractor.output} activate: when: ${incident_id_extractor.output.has_incident_id} is: true - name: tsg_retriever type: python source: type: code path: tsg_retriever.py inputs: content: ${incident_info_extractor.output.incident_content} activate: when: ${incident_info_extractor.output.retriever} is: tsg - name: icm_retriever type: python source: type: code path: icm_retriever.py inputs: content: ${incident_info_extractor.output.incident_content} activate: when: ${incident_info_extractor.output.retriever} is: icm - name: kql_tsg_retriever type: python source: type: code path: kql_tsg_retriever.py inputs: content: ${incident_info_extractor.output.incident_content} activate: when: ${incident_info_extractor.output.retriever} is: tsg - name: investigation_steps type: llm source: type: code path: investigation_steps.jinja2 inputs: deployment_name: gpt-35-turbo temperature: 0.7 top_p: 1 stop: "" max_tokens: 256 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" first_method: ${icm_retriever.output} second_method: ${tsg_retriever.output} third_method: ${kql_tsg_retriever.output} provider: AzureOpenAI connection: azure_open_ai_connection api: chat module: promptflow.tools.aoai - name: retriever_summary type: python source: type: code path: retriever_summary.py inputs: summary: ${investigation_steps.output} - name: investigation_method type: python source: type: code path: investigation_method.py inputs: method1: ${job_info_extractor.output} method2: ${retriever_summary.output}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/incident_info_extractor.py
from promptflow import tool @tool def extract_incident_info(incident: dict) -> str: retriever_type = ["icm", "tsg", "kql"] return { "retriever": retriever_type[incident["incident_id"]], "incident_content": incident["incident_content"] }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/job_info_extractor.py
from promptflow import tool @tool def extract_job_info(incident_content: str) -> str: print(f"Incident: {incident_content}") return "Execute job info extractor"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/inputs.json
[ { "case": "double", "value": 1 }, { "case": "double", "value": 2 }, { "case": "square", "value": 3 }, { "case": "square", "value": 4 } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/double.py
from promptflow import tool @tool def double(input: int) -> int: return 2*input
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/aggregation_node.py
from promptflow import tool from promptflow import log_metric @tool def average(input: list): avg, cnt = 0, 0 for num in input: if num!=None: avg += num cnt += 1 if len(input) > 0: avg = avg/cnt log_metric("average", avg) return avg
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/expected_result.json
[ { "expected_node_count": 3, "expected_outputs":{ "output":{ "double": 2, "square": "" } }, "expected_bypassed_nodes":["square"] }, { "expected_node_count": 3, "expected_outputs":{ "output":{ "double": 4, "square": "" } }, "expected_bypassed_nodes":["square"] }, { "expected_node_count": 3, "expected_outputs":{ "output":{ "double": null, "square": 9 } }, "expected_bypassed_nodes":["double"] }, { "expected_node_count": 3, "expected_outputs":{ "output":{ "double": null, "square": 16 } }, "expected_bypassed_nodes":["double"] } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/square.py
from promptflow import tool @tool def square(input: int) -> int: return input*input
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/expected_status_summary.json
{ "square.bypassed": 2, "double.completed": 2, "collect_node.completed": 4, "double.bypassed": 2, "square.completed": 2, "aggregation_double.completed": 1, "aggregation_square.completed": 1 }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/collect_node.py
from promptflow import tool @tool def collect(input1, input2: str="") -> str: return {'double': input1, 'square': input2}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/flow.dag.yaml
inputs: case: type: string default: double is_chat_input: false value: type: int default: 1 outputs: output: type: string reference: ${collect_node.output} evaluation_only: false is_chat_output: false nodes: - name: double type: python source: type: code path: double.py inputs: input: ${inputs.value} activate: when: ${inputs.case} is: double aggregation: false - name: square type: python source: type: code path: square.py inputs: input: ${inputs.value} activate: when: ${inputs.case} is: square aggregation: false - name: aggregation_double type: python source: type: code path: aggregation_node.py inputs: input: ${double.output} aggregation: true - name: aggregation_square type: python source: type: code path: aggregation_node.py inputs: input: ${square.output} aggregation: true - name: collect_node type: python source: type: code path: collect_node.py inputs: input1: ${double.output} input2: ${square.output} aggregation: false
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/llm_tool/echo.py
from promptflow import tool @tool def echo(input: str) -> str: return input
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/llm_tool/flow.dag.yaml
inputs: topic: type: string default: hello world is_chat_input: false stream: type: bool default: false is_chat_input: false outputs: joke: type: string reference: ${echo.output} nodes: - name: echo type: python source: type: code path: echo.py inputs: input: ${joke.output} use_variants: false - name: joke type: llm source: type: code path: joke.jinja2 inputs: deployment_name: gpt-35-turbo temperature: 1 top_p: 1 max_tokens: 256 presence_penalty: 0 frequency_penalty: 0 stream: ${inputs.stream} topic: ${inputs.topic} provider: AzureOpenAI connection: azure_open_ai_connection api: chat module: promptflow.tools.aoai use_variants: false
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/llm_tool/joke.jinja2
{# Prompt is a jinja2 template that generates prompt for LLM #} system: You are a bot can tell good jokes user: A joke about {{topic}} please
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_non_english_input/data.jsonl
{"text": "Hello 123 日本語"} {"text": "World 123 日本語"}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_non_english_input/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: text: type: string default: Hello 日本語 outputs: output: type: string reference: ${hello_prompt.output} nodes: - name: hello_prompt type: prompt source: type: code path: hello.jinja2 inputs: text: ${inputs.text}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/flow_with_non_english_input/hello.jinja2
{{text}}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_ten_inputs/data.jsonl
{"input": "atom", "index": 0} {"input": "atom", "index": 6} {"input": "atom", "index": 12} {"input": "atom", "index": 18} {"input": "atom", "index": 24} {"input": "atom", "index": 30} {"input": "atom", "index": 36} {"input": "atom", "index": 42} {"input": "atom", "index": 48} {"input": "atom", "index": 54}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_ten_inputs/samples.json
[ { "input": "atom", "index": 0 }, { "input": "atom", "index": 6 }, { "input": "atom", "index": 12 },{ "input": "atom", "index": 18 },{ "input": "atom", "index": 24 },{ "input": "atom", "index": 30 },{ "input": "atom", "index": 36 },{ "input": "atom", "index": 42 },{ "input": "atom", "index": 48 },{ "input": "atom", "index": 54 } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_ten_inputs/python_node.py
from promptflow import tool import time @tool def python_node(input: str, index: int) -> str: time.sleep(index + 5) return input
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_ten_inputs/flow.dag.yaml
id: template_standard_flow name: Template Standard Flow inputs: input: type: string is_chat_input: false index: type: int is_chat_input: false outputs: output: type: string reference: ${python_node.output} nodes: - name: python_node type: python source: type: code path: python_node.py inputs: index: ${inputs.index} input: ${inputs.input} use_variants: false node_variants: {}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/samples.json
[ { "url": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h" }, { "url": "https://www.microsoft.com/en-us/windows/" } ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/convert_to_dict.py
import json from promptflow import tool @tool def convert_to_dict(input_str: str): try: return json.loads(input_str) except Exception as e: print("input is not valid, error: {}".format(e)) return {"category": "None", "evidence": "None"}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/fetch_text_content_from_url.py
import bs4 import requests from promptflow import tool @tool def fetch_text_content_from_url(url: str): # Send a request to the URL try: headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35" } response = requests.get(url, headers=headers) if response.status_code == 200: # Parse the HTML content using BeautifulSoup soup = bs4.BeautifulSoup(response.text, "html.parser") soup.prettify() return soup.get_text()[:2000] else: msg = ( f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: {response.text[:100]}" ) print(msg) return "No available content" except Exception as e: print("Get url failed with error: {}".format(e)) return "No available content"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/classify_with_llm.jinja2
system: Your task is to classify a given url into one of the following types: Movie, App, Academic, Channel, Profile, PDF or None based on the text content information. The classification will be based on the url, the webpage text content summary, or both. user: Here are a few examples: {% for ex in examples %} URL: {{ex.url}} Text content: {{ex.text_content}} OUTPUT: {"category": "{{ex.category}}", "evidence": "{{ex.evidence}}"} {% endfor %} For a given URL : {{url}}, and text content: {{text_content}}. Classify above url to complete the category and indicate evidence. OUTPUT:
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/summarize_text_content__variant_1.jinja2
system: Please summarize some keywords of this paragraph and have some details of each keywords. Do not add any information that is not in the text. user: Text: {{text}} Summary:
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/prepare_examples.py
from promptflow import tool @tool def prepare_examples(): return [ { "url": "https://play.google.com/store/apps/details?id=com.spotify.music", "text_content": "Spotify is a free music and podcast streaming app with millions of songs, albums, and original podcasts. It also offers audiobooks, so users can enjoy thousands of stories. It has a variety of features such as creating and sharing music playlists, discovering new music, and listening to popular and exclusive podcasts. It also has a Premium subscription option which allows users to download and listen offline, and access ad-free music. It is available on all devices and has a variety of genres and artists to choose from.", "category": "App", "evidence": "Both", }, { "url": "https://www.youtube.com/channel/UC_x5XG1OV2P6uZZ5FSM9Ttw", "text_content": "NFL Sunday Ticket is a service offered by Google LLC that allows users to watch NFL games on YouTube. It is available in 2023 and is subject to the terms and privacy policy of Google LLC. It is also subject to YouTube's terms of use and any applicable laws.", "category": "Channel", "evidence": "URL", }, { "url": "https://arxiv.org/abs/2303.04671", "text_content": "Visual ChatGPT is a system that enables users to interact with ChatGPT by sending and receiving not only languages but also images, providing complex visual questions or visual editing instructions, and providing feedback and asking for corrected results. It incorporates different Visual Foundation Models and is publicly available. Experiments show that Visual ChatGPT opens the door to investigating the visual roles of ChatGPT with the help of Visual Foundation Models.", "category": "Academic", "evidence": "Text content", }, { "url": "https://ab.politiaromana.ro/", "text_content": "There is no content available for this text.", "category": "None", "evidence": "None", }, ]
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/flow.dag.yaml
inputs: url: type: string default: https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h outputs: category: type: string reference: ${convert_to_dict.output.category} evidence: type: string reference: ${convert_to_dict.output.evidence} nodes: - name: fetch_text_content_from_url type: python source: type: code path: fetch_text_content_from_url.py inputs: url: ${inputs.url} - name: summarize_text_content type: llm source: type: code path: summarize_text_content__variant_1.jinja2 inputs: deployment_name: gpt-35-turbo suffix: '' max_tokens: '256' temperature: '0.2' top_p: '1.0' logprobs: '' echo: 'False' stop: '' presence_penalty: '0' frequency_penalty: '0' best_of: '1' logit_bias: '' text: ${fetch_text_content_from_url.output} provider: AzureOpenAI connection: azure_open_ai_connection api: chat module: promptflow.tools.aoai - name: prepare_examples type: python source: type: code path: prepare_examples.py inputs: {} - name: classify_with_llm type: llm source: type: code path: classify_with_llm.jinja2 inputs: deployment_name: gpt-35-turbo suffix: '' max_tokens: '128' temperature: '0.2' top_p: '1.0' logprobs: '' echo: 'False' stop: '' presence_penalty: '0' frequency_penalty: '0' best_of: '1' logit_bias: '' url: ${inputs.url} examples: ${prepare_examples.output} text_content: ${summarize_text_content.output} provider: AzureOpenAI connection: azure_open_ai_connection api: chat module: promptflow.tools.aoai - name: convert_to_dict type: python source: type: code path: convert_to_dict.py inputs: input_str: ${classify_with_llm.output}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/summarize_text_content.jinja2
system: Please summarize the following text in one paragraph. 100 words. Do not add any information that is not in the text. user: Text: {{text}} Summary:
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/flow.tools.json
{ "package": {}, "code": { "fetch_text_content_from_url.py": { "type": "python", "inputs": { "url": { "type": [ "string" ] } }, "function": "fetch_text_content_from_url" }, "summarize_text_content.jinja2": { "type": "llm", "inputs": { "text": { "type": [ "string" ] } }, "description": "Summarize webpage content into a short paragraph." }, "summarize_text_content__variant_1.jinja2": { "type": "llm", "inputs": { "text": { "type": [ "string" ] } } }, "prepare_examples.py": { "type": "python", "function": "prepare_examples" }, "classify_with_llm.jinja2": { "type": "llm", "inputs": { "url": { "type": [ "string" ] }, "examples": { "type": [ "string" ] }, "text_content": { "type": [ "string" ] } }, "description": "Multi-class classification of a given url and text content." }, "convert_to_dict.py": { "type": "python", "inputs": { "input_str": { "type": [ "string" ] } }, "function": "convert_to_dict" } } }
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/lkg_sources/convert_to_dict.py
import json from promptflow import tool @tool def convert_to_dict(input_str: str): try: return json.loads(input_str) except Exception as e: print("input is not valid, error: {}".format(e)) return {"category": "None", "evidence": "None"}
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/lkg_sources/fetch_text_content_from_url.py
import bs4 import requests from promptflow import tool @tool def fetch_text_content_from_url(url: str): # Send a request to the URL try: headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.35" } response = requests.get(url, headers=headers) if response.status_code == 200: # Parse the HTML content using BeautifulSoup soup = bs4.BeautifulSoup(response.text, "html.parser") soup.prettify() return soup.get_text()[:2000] else: msg = ( f"Get url failed with status code {response.status_code}.\nURL: {url}\nResponse: {response.text[:100]}" ) print(msg) return "No available content" except Exception as e: print("Get url failed with error: {}".format(e)) return "No available content"
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/lkg_sources/classify_with_llm.jinja2
Your task is to classify a given url into one of the following types: Movie, App, Academic, Channel, Profile, PDF or None based on the text content information. The classification will be based on the url, the webpage text content summary, or both. Here are a few examples: {% for ex in examples %} URL: {{ex.url}} Text content: {{ex.text_content}} OUTPUT: {"category": "{{ex.category}}", "evidence": "{{ex.evidence}}"} {% endfor %} For a given URL : {{url}}, and text content: {{text_content}}. Classify above url to complete the category and indicate evidence. OUTPUT:
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/lkg_sources/summarize_text_content__variant_1.jinja2
Please summarize some keywords of this paragraph and have some details of each keywords. Do not add any information that is not in the text. Text: {{text}} Summary:
0
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow
promptflow_repo/promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/.promptflow/lkg_sources/prepare_examples.py
from promptflow import tool @tool def prepare_examples(): return [ { "url": "https://play.google.com/store/apps/details?id=com.spotify.music", "text_content": "Spotify is a free music and podcast streaming app with millions of songs, albums, and original podcasts. It also offers audiobooks, so users can enjoy thousands of stories. It has a variety of features such as creating and sharing music playlists, discovering new music, and listening to popular and exclusive podcasts. It also has a Premium subscription option which allows users to download and listen offline, and access ad-free music. It is available on all devices and has a variety of genres and artists to choose from.", "category": "App", "evidence": "Both", }, { "url": "https://www.youtube.com/channel/UC_x5XG1OV2P6uZZ5FSM9Ttw", "text_content": "NFL Sunday Ticket is a service offered by Google LLC that allows users to watch NFL games on YouTube. It is available in 2023 and is subject to the terms and privacy policy of Google LLC. It is also subject to YouTube's terms of use and any applicable laws.", "category": "Channel", "evidence": "URL", }, { "url": "https://arxiv.org/abs/2303.04671", "text_content": "Visual ChatGPT is a system that enables users to interact with ChatGPT by sending and receiving not only languages but also images, providing complex visual questions or visual editing instructions, and providing feedback and asking for corrected results. It incorporates different Visual Foundation Models and is publicly available. Experiments show that Visual ChatGPT opens the door to investigating the visual roles of ChatGPT with the help of Visual Foundation Models.", "category": "Academic", "evidence": "Text content", }, { "url": "https://ab.politiaromana.ro/", "text_content": "There is no content available for this text.", "category": "None", "evidence": "None", }, ]
0