repo_id
stringlengths
15
132
file_path
stringlengths
34
176
content
stringlengths
2
3.52M
__index_level_0__
int64
0
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/utils.py
import json class AttrDict(dict): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __getattr__(self, item): if item in self: return self.__getitem__(item) return super().__getattribute__(item) def is_json_serializable(data, function_name): try: json.dumps(data) except TypeError: raise TypeError(f"{function_name} output is not JSON serializable!") def verify_url_exists(endpoint_url: str) -> bool: import urllib.request from urllib.request import HTTPError from urllib.error import URLError try: urllib.request.urlopen( urllib.request.Request(endpoint_url), timeout=50) except HTTPError as e: # verify that the connection is not authorized, anything else would mean the endpoint is failed return e.code == 403 except URLError: # Endpoint does not exist - skip the test return False raise Exception("Task Succeeded unexpectedly.")
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_common.py
import pytest from promptflow.contracts.multimedia import Image from promptflow.tools.common import ChatAPIInvalidFunctions, validate_functions, process_function_call, \ parse_chat, find_referenced_image_set, preprocess_template_string, convert_to_chat_list, ChatInputList class TestCommon: @pytest.mark.parametrize( "functions, error_message", [ ([], "functions cannot be an empty list"), (["str"], "is not a dict. Here is a valid function example"), ([{"name": "func1"}], "does not have 'parameters' property"), ([{"name": "func1", "parameters": "param1"}], "should be described as a JSON Schema object"), ([{"name": "func1", "parameters": {"type": "int", "properties": {}}}], "parameters 'type' should be 'object'"), ([{"name": "func1", "parameters": {"type": "object", "properties": []}}], "should be described as a JSON Schema object"), ], ) def test_chat_api_invalid_functions(self, functions, error_message): error_codes = "UserError/ToolValidationError/ChatAPIInvalidFunctions" with pytest.raises(ChatAPIInvalidFunctions) as exc_info: validate_functions(functions) assert error_message in exc_info.value.message assert exc_info.value.error_codes == error_codes.split("/") @pytest.mark.parametrize( "function_call, error_message", [ ("123", "function_call parameter '123' must be a dict"), ({"name1": "get_current_weather"}, 'function_call parameter {"name1": "get_current_weather"} must ' 'contain "name" field'), ], ) def test_chat_api_invalid_function_call(self, function_call, error_message): error_codes = "UserError/ToolValidationError/ChatAPIInvalidFunctions" with pytest.raises(ChatAPIInvalidFunctions) as exc_info: process_function_call(function_call) assert error_message in exc_info.value.message assert exc_info.value.error_codes == error_codes.split("/") @pytest.mark.parametrize( "chat_str, images, expected_result", [ ("system:\nthis is my function:\ndef hello", None, [ {'role': 'system', 'content': 'this is my function:\ndef hello'}]), ("#system:\nthis is my ##function:\ndef hello", None, [ {'role': 'system', 'content': 'this is my ##function:\ndef hello'}]), (" \n system:\nthis is my function:\ndef hello", None, [ {'role': 'system', 'content': 'this is my function:\ndef hello'}]), (" \n # system:\nthis is my function:\ndef hello", None, [ {'role': 'system', 'content': 'this is my function:\ndef hello'}]), ("user:\nhi\nassistant:\nanswer\nfunction:\nname:\nn\ncontent:\nc", None, [ {'role': 'user', 'content': 'hi'}, {'role': 'assistant', 'content': 'answer'}, {'role': 'function', 'name': 'n', 'content': 'c'}]), ("#user :\nhi\n #assistant:\nanswer\n# function:\n##name:\nn\n##content:\nc", None, [ {'role': 'user', 'content': 'hi'}, {'role': 'assistant', 'content': 'answer'}, {'role': 'function', 'name': 'n', 'content': 'c'}]), ("\nsystem:\nfirst\n\nsystem:\nsecond", None, [ {'role': 'system', 'content': 'first'}, {'role': 'system', 'content': 'second'}]), ("\n#system:\nfirst\n\n#system:\nsecond", None, [ {'role': 'system', 'content': 'first'}, {'role': 'system', 'content': 'second'}]), ("\n#system:\nfirst\n#assistant:\n#user:\nsecond", None, [ {'role': 'system', 'content': 'first'}, {'role': 'assistant', 'content': ''}, {'role': 'user', 'content': 'second'} ]), # todo: enable this test case after we support image_url officially # ("#user:\ntell me about the images\nImage(1edf82c2)\nImage(9b65b0f4)", [ # Image("image1".encode()), Image("image2".encode(), "image/png", "https://image_url")], [ # {'role': 'user', 'content': [ # {'type': 'text', 'text': 'tell me about the images'}, # {'type': 'image_url', 'image_url': {'url': 'data:image/*;base64,aW1hZ2Ux'}}, # {'type': 'image_url', 'image_url': 'https://image_url'}]}, # ]) ] ) def test_success_parse_role_prompt(self, chat_str, images, expected_result): actual_result = parse_chat(chat_str, images) assert actual_result == expected_result @pytest.mark.parametrize( "chat_str, expected_result", [ ("\n#system:\n##name:\nAI \n content:\nfirst\n\n#user:\nsecond", [ {'role': 'system', 'name': 'AI', 'content': 'first'}, {'role': 'user', 'content': 'second'}]), ("\nuser:\nname:\n\nperson\n content:\n", [ {'role': 'user', 'name': 'person', 'content': ''}]), ("\nsystem:\nname:\n\n content:\nfirst", [ {'role': 'system', 'content': 'name:\n\n content:\nfirst'}]), ("\nsystem:\nname:\n\n", [ {'role': 'system', 'content': 'name:'}]) ] ) def test_parse_chat_with_name_in_role_prompt(self, chat_str, expected_result): actual_result = parse_chat(chat_str) assert actual_result == expected_result @pytest.mark.parametrize( "kwargs, expected_result", [ ({}, set()), ({"image_1": Image("image1".encode()), "image_2": Image("image2".encode()), "t1": "text"}, { Image("image1".encode()), Image("image2".encode()) }), ({"images": [Image("image1".encode()), Image("image2".encode())]}, { Image("image1".encode()), Image("image2".encode()) }), ({"image_1": Image("image1".encode()), "image_2": Image("image1".encode())}, { Image("image1".encode()) }), ({"images": {"image_1": Image("image1".encode()), "image_2": Image("image2".encode())}}, { Image("image1".encode()), Image("image2".encode()) }) ] ) def test_find_referenced_image_set(self, kwargs, expected_result): actual_result = find_referenced_image_set(kwargs) assert actual_result == expected_result @pytest.mark.parametrize( "input_string, expected_output", [ ("![image]({{img1}})", "\n{{img1}}\n"), ("![image]({{img1}})![image]({{img2}})", "\n{{img1}}\n\n{{img2}}\n"), ("No image here", "No image here"), ("![image]({{img1}}) Some text ![image]({{img2}})", "\n{{img1}}\n Some text \n{{img2}}\n"), ], ) def test_preprocess_template_string(self, input_string, expected_output): actual_result = preprocess_template_string(input_string) assert actual_result == expected_output @pytest.mark.parametrize( "input_data, expected_output", [ ({}, {}), ({"key": "value"}, {"key": "value"}), (["item1", "item2"], ChatInputList(["item1", "item2"])), ({"key": ["item1", "item2"]}, {"key": ChatInputList(["item1", "item2"])}), (["item1", ["nested_item1", "nested_item2"]], ChatInputList(["item1", ChatInputList(["nested_item1", "nested_item2"])])), ], ) def test_convert_to_chat_list(self, input_data, expected_output): actual_result = convert_to_chat_list(input_data) assert actual_result == expected_output
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_aoai.py
from unittest.mock import patch import pytest import json from promptflow.connections import AzureOpenAIConnection from promptflow.tools.aoai import chat, completion from promptflow.tools.exception import WrappedOpenAIError from tests.utils import AttrDict @pytest.mark.usefixtures("use_secrets_config_file") class TestAOAI: def test_aoai_completion(self, aoai_provider): prompt_template = "please complete this sentence: world war II " # test whether tool can handle param "stop" with value empty list # as openai raises "[] is not valid under any of the given schemas - 'stop'" aoai_provider.completion( prompt=prompt_template, deployment_name="gpt-35-turbo-instruct", stop=[], logit_bias={} ) def test_aoai_stream_completion(self, aoai_provider): prompt_template = "please complete this sentence: world war II " # test whether tool can handle param "stop" with value empty list in stream mode # as openai raises "[] is not valid under any of the given schemas - 'stop'" aoai_provider.completion( prompt=prompt_template, deployment_name="gpt-35-turbo-instruct", stop=[], logit_bias={}, stream=True ) def test_aoai_chat(self, aoai_provider, example_prompt_template, chat_history): result = aoai_provider.chat( prompt=example_prompt_template, deployment_name="gpt-35-turbo", max_tokens="32", temperature=0, user_input="Fill in more details about trend 2.", chat_history=chat_history, ) assert "additional details" in result.lower() def test_aoai_chat_api(self, azure_open_ai_connection, example_prompt_template, chat_history): result = chat( connection=azure_open_ai_connection, prompt=example_prompt_template, deployment_name="gpt-35-turbo", max_tokens="inF", temperature=0, user_input="Write a slogan for product X", chat_history=chat_history, ) assert "Product X".lower() in result.lower() @pytest.mark.parametrize( "function_call", [ "auto", {"name": "get_current_weather"}, ], ) def test_aoai_chat_with_function( self, azure_open_ai_connection, example_prompt_template, chat_history, functions, function_call): result = chat( connection=azure_open_ai_connection, prompt=example_prompt_template, deployment_name="gpt-35-turbo", max_tokens="inF", temperature=0, user_input="What is the weather in Boston?", chat_history=chat_history, functions=functions, function_call=function_call ) assert "function_call" in result assert result["function_call"]["name"] == "get_current_weather" def test_aoai_chat_with_name_in_roles( self, azure_open_ai_connection, example_prompt_template_with_name_in_roles, chat_history, functions): result = chat( connection=azure_open_ai_connection, prompt=example_prompt_template_with_name_in_roles, deployment_name="gpt-35-turbo", max_tokens="inF", temperature=0, functions=functions, name="get_location", result=json.dumps({"location": "Austin"}), question="What is the weather in Boston?", prev_question="Where is Boston?" ) assert "function_call" in result assert result["function_call"]["name"] == "get_current_weather" def test_aoai_chat_message_with_no_content(self, aoai_provider): # missing colon after role name. Sometimes following prompt may result in empty content. prompt = ( "user:\n what is your name\nassistant\nAs an AI language model developed by" " OpenAI, I do not have a name. You can call me OpenAI or AI assistant. " "How can I assist you today?" ) # assert chat tool can handle. aoai_provider.chat(prompt=prompt, deployment_name="gpt-35-turbo") # empty content after role name:\n prompt = "user:\n" aoai_provider.chat(prompt=prompt, deployment_name="gpt-35-turbo") def test_aoai_stream_chat(self, aoai_provider, example_prompt_template, chat_history): result = aoai_provider.chat( prompt=example_prompt_template, deployment_name="gpt-35-turbo", max_tokens="32", temperature=0, user_input="Fill in more details about trend 2.", chat_history=chat_history, stream=True, ) answer = "" while True: try: answer += next(result) except Exception: break assert "additional details" in answer.lower() @pytest.mark.parametrize( "params, expected", [ ({"stop": [], "logit_bias": {}}, {"stop": None}), ({"stop": ["</i>"], "logit_bias": {"16": 100, "17": 100}}, {}), ], ) def test_aoai_parameters(self, params, expected): for k, v in params.items(): if k not in expected: expected[k] = v deployment_name = "dummy" conn_dict = {"api_key": "dummy", "api_base": "base", "api_version": "dummy_ver", "api_type": "azure"} conn = AzureOpenAIConnection(**conn_dict) def mock_completion(self, **kwargs): assert kwargs["model"] == deployment_name for k, v in expected.items(): assert kwargs[k] == v, f"Expect {k} to be {v}, but got {kwargs[k]}" text = kwargs["prompt"] return AttrDict({"choices": [AttrDict({"text": text})]}) with patch("openai.resources.Completions.create", new=mock_completion): prompt = "dummy_prompt" result = completion(connection=conn, prompt=prompt, deployment_name=deployment_name, **params) assert result == prompt def test_aoai_chat_with_response_format( self, azure_open_ai_connection, example_prompt_template, chat_history): result = chat( connection=azure_open_ai_connection, prompt=example_prompt_template, deployment_name="gpt-35-turbo-1106", temperature=0, user_input="Write a slogan for product X, please response with json.", chat_history=chat_history, response_format={"type": "json_object"} ) assert "x:".lower() in result.lower() @pytest.mark.parametrize( "response_format, user_input, error_message, error_codes, exception", [ ({"type": "json"}, "Write a slogan for product X, please response with json.", "\'json\' is not one of [\'json_object\', \'text\']", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError), ({"type": "json_object"}, "Write a slogan for product X", "\'messages\' must contain the word \'json\' in some form", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError), ({"types": "json_object"}, "Write a slogan for product X", "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError) ] ) def test_aoai_chat_with_invalid_response_format( self, azure_open_ai_connection, example_prompt_template, chat_history, response_format, user_input, error_message, error_codes, exception ): with pytest.raises(exception) as exc_info: chat( connection=azure_open_ai_connection, prompt=example_prompt_template, deployment_name="gpt-35-turbo-1106", temperature=0, user_input=user_input, chat_history=chat_history, response_format=response_format ) assert error_message in exc_info.value.message assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_chat_with_not_support_response_format_json_mode_model( self, azure_open_ai_connection, example_prompt_template, chat_history ): with pytest.raises(WrappedOpenAIError) as exc_info: chat( connection=azure_open_ai_connection, prompt=example_prompt_template, deployment_name="gpt-35-turbo", temperature=0, user_input="Write a slogan for product X, please response with json.", chat_history=chat_history, response_format={"type": "json_object"} ) error_message = "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}." assert error_message in exc_info.value.message assert exc_info.value.error_codes == "UserError/OpenAIError/BadRequestError".split("/") def test_aoai_chat_with_response_format_text_mode( self, azure_open_ai_connection, example_prompt_template, chat_history ): result = chat( connection=azure_open_ai_connection, prompt=example_prompt_template, deployment_name="gpt-35-turbo", temperature=0, user_input="Write a slogan for product X.", chat_history=chat_history, response_format={"type": "text"} ) assert "Product X".lower() in result.lower()
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/conftest.py
import json import os import pytest import sys from pathlib import Path from pytest_mock import MockerFixture # noqa: E402 from tests.utils import verify_url_exists # Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow' # since the code here is in promptflow namespace as well from promptflow._internal import ConnectionManager from promptflow.connections import CustomConnection, OpenAIConnection, SerpConnection from promptflow.contracts.multimedia import Image from promptflow.tools.aoai import AzureOpenAI PROMOTFLOW_ROOT = Path(__file__).absolute().parents[1] CONNECTION_FILE = (PROMOTFLOW_ROOT / "connections.json").resolve().absolute().as_posix() root_str = str(PROMOTFLOW_ROOT.resolve().absolute()) if root_str not in sys.path: sys.path.insert(0, root_str) # connection @pytest.fixture(autouse=True) def use_secrets_config_file(mocker: MockerFixture): mocker.patch.dict(os.environ, {"PROMPTFLOW_CONNECTIONS": CONNECTION_FILE}) @pytest.fixture def azure_open_ai_connection(): return ConnectionManager().get("azure_open_ai_connection") @pytest.fixture def aoai_provider(azure_open_ai_connection) -> AzureOpenAI: aoai_provider = AzureOpenAI(azure_open_ai_connection) return aoai_provider @pytest.fixture def open_ai_connection(): return ConnectionManager().get("open_ai_connection") @pytest.fixture def serp_connection(): return ConnectionManager().get("serp_connection") def verify_om_llm_custom_connection(connection: CustomConnection) -> bool: '''Verify that there is a MIR endpoint up and available for the Custom Connection. We explicitly do not pass the endpoint key to avoid the delay in generating a response. ''' return verify_url_exists(connection.configs['endpoint_url']) @pytest.fixture def gpt2_custom_connection(): return ConnectionManager().get("gpt2_connection") @pytest.fixture def open_model_llm_ws_service_connection() -> bool: try: creds_custom_connection: CustomConnection = ConnectionManager().get("open_source_llm_ws_service_connection") subs = json.loads(creds_custom_connection.secrets['service_credential']) for key, value in subs.items(): os.environ[key] = value return True except Exception as e: print(f"""Something failed setting environment variables for service credentials. Error: {e}""") return False @pytest.fixture(autouse=True) def skip_if_no_api_key(request, mocker): mocker.patch.dict(os.environ, {"PROMPTFLOW_CONNECTIONS": CONNECTION_FILE}) if request.node.get_closest_marker('skip_if_no_api_key'): conn_name = request.node.get_closest_marker('skip_if_no_api_key').args[0] connection = request.getfixturevalue(conn_name) # if dummy placeholder key, skip. if isinstance(connection, OpenAIConnection) or isinstance(connection, SerpConnection): if "-api-key" in connection.api_key: pytest.skip('skipped because no key') elif isinstance(connection, CustomConnection): if "endpoint_api_key" not in connection.secrets or "-api-key" in connection.secrets["endpoint_api_key"]: pytest.skip('skipped because no key') # Verify Custom Connections, but only those used by the Open_Model_LLM Tool if "endpoint_url" in connection.configs and "-endpoint-url" not in connection.configs["endpoint_url"]: if not verify_om_llm_custom_connection(connection): pytest.skip('skipped because the connection is not valid') # example prompts @pytest.fixture def example_prompt_template() -> str: with open(PROMOTFLOW_ROOT / "tests/test_configs/prompt_templates/marketing_writer/prompt.jinja2") as f: prompt_template = f.read() return prompt_template @pytest.fixture def example_prompt_template_with_name_in_roles() -> str: with open(PROMOTFLOW_ROOT / "tests/test_configs/prompt_templates/prompt_with_name_in_roles.jinja2") as f: prompt_template = f.read() return prompt_template @pytest.fixture def chat_history() -> list: with open(PROMOTFLOW_ROOT / "tests/test_configs/prompt_templates/marketing_writer/history.json") as f: history = json.load(f) return history @pytest.fixture def example_prompt_template_with_function() -> str: with open(PROMOTFLOW_ROOT / "tests/test_configs/prompt_templates/prompt_with_function.jinja2") as f: prompt_template = f.read() return prompt_template @pytest.fixture def example_prompt_template_with_image() -> str: with open(PROMOTFLOW_ROOT / "tests/test_configs/prompt_templates/prompt_with_image.jinja2") as f: prompt_template = f.read() return prompt_template @pytest.fixture def example_image() -> Image: with open(PROMOTFLOW_ROOT / "tests/test_configs/prompt_templates/images/number10.jpg", "rb") as f: image = Image(f.read()) return image # functions @pytest.fixture def functions(): return [ { "name": "get_current_weather", "parameters": { "type": "object", "properties": {}, }, } ] @pytest.fixture def azure_content_safety_connection(): return ConnectionManager().get("azure_content_safety_connection")
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_aoai_gptv.py
import pytest from promptflow.tools.aoai_gpt4v import AzureOpenAI @pytest.fixture def azure_openai_provider(azure_open_ai_connection) -> AzureOpenAI: return AzureOpenAI(azure_open_ai_connection) @pytest.mark.usefixtures("use_secrets_config_file") @pytest.mark.skip("Skipping until we have a Azure OpenAI GPT-4 Vision deployment") class TestAzureOpenAIGPT4V: def test_openai_gpt4v_chat(self, azure_openai_provider, example_prompt_template_with_image, example_image): result = azure_openai_provider.chat( prompt=example_prompt_template_with_image, deployment_name="gpt-4v", max_tokens=480, temperature=0, question="which number did you see in this picture?", image_input=example_image, ) assert "10" == result def test_openai_gpt4v_stream_chat(self, azure_openai_provider, example_prompt_template_with_image, example_image): result = azure_openai_provider.chat( prompt=example_prompt_template_with_image, deployment_name="gpt-4v", max_tokens=480, temperature=0, question="which number did you see in this picture?", image_input=example_image, ) answer = "" while True: try: answer += next(result) except Exception: break assert "10" == result
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_handle_openai_error.py
import httpx import pytest from jinja2.exceptions import TemplateSyntaxError from openai import ( APIConnectionError, RateLimitError, AuthenticationError, BadRequestError, APITimeoutError, InternalServerError, UnprocessableEntityError ) from promptflow.tools.aoai import chat, completion from promptflow.tools.common import handle_openai_error from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, to_openai_error_message, \ JinjaTemplateError, LLMError, ChatAPIFunctionRoleInvalidFormat from promptflow.tools.openai import chat as openai_chat from promptflow.tools.aoai_gpt4v import AzureOpenAI as AzureOpenAIVision from pytest_mock import MockerFixture from promptflow.exceptions import UserErrorException @pytest.mark.usefixtures("use_secrets_config_file") class TestHandleOpenAIError: def test_aoai_chat_message_invalid_format(self, aoai_provider): # chat api prompt should follow the format of "system:\nmessage1\nuser:\nmessage2". prompt = "what is your name" error_codes = "UserError/ToolValidationError/ChatAPIInvalidRole" with pytest.raises(ChatAPIInvalidRole, match="The Chat API requires a specific format for prompt") as exc_info: aoai_provider.chat(prompt=prompt, deployment_name="gpt-35-turbo") assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_authentication_error_with_bad_api_key(self, azure_open_ai_connection): azure_open_ai_connection.api_key = "hello" prompt_template = "please complete this sentence: world war II " raw_message = ( "Unauthorized. Access token is missing, invalid" ) error_codes = "UserError/OpenAIError/AuthenticationError" with pytest.raises(WrappedOpenAIError) as exc_info: chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo") assert raw_message in exc_info.value.message assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_connection_error_with_bad_api_base(self, azure_open_ai_connection): azure_open_ai_connection.api_base = "https://gpt-test-eus11.openai.azure.com/" prompt_template = "please complete this sentence: world war II " error_codes = "UserError/OpenAIError/APIConnectionError" with pytest.raises(WrappedOpenAIError) as exc_info: chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo") assert "Connection error." in exc_info.value.message assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_not_found_error_with_bad_api_version(self, azure_open_ai_connection): """NotFoundError: Resource not found""" azure_open_ai_connection.api_version = "2022-12-23" prompt_template = "please complete this sentence: world war II " raw_message = "Resource not found" error_codes = "UserError/OpenAIError/NotFoundError" # Chat will throw: Exception occurs: NotFoundError: Resource not found with pytest.raises(WrappedOpenAIError) as exc_info: chat(azure_open_ai_connection, prompt=f"user:\n{prompt_template}", deployment_name="gpt-35-turbo") assert raw_message in exc_info.value.message assert exc_info.value.error_codes == error_codes.split("/") def test_aoai_not_found_error_with_bad_deployment(self, aoai_provider): """ NotFoundError: The API deployment for this resource does not exist. If you created the deployment within the last 5 minutes, please wait a moment and try again. """ # This will throw InvalidRequestError prompt_template = "please complete this sentence: world war II " deployment = "hello" raw_message = ( "The API deployment for this resource does not exist. If you created the deployment " "within the last 5 minutes, please wait a moment and try again." ) error_codes = "UserError/OpenAIError/NotFoundError" with pytest.raises(WrappedOpenAIError) as exc_info: aoai_provider.chat(prompt=f"user:\n{prompt_template}", deployment_name=deployment) assert raw_message in exc_info.value.message assert exc_info.value.error_codes == error_codes.split("/") def test_rate_limit_error_insufficient_quota(self, azure_open_ai_connection, mocker: MockerFixture): dummyEx = RateLimitError("Something went wrong", response=httpx.Response( 429, request=httpx.Request('GET', 'https://www.example.com')), body={"type": "insufficient_quota"}) mock_method = mocker.patch("openai.resources.Completions.create", side_effect=dummyEx) error_codes = "UserError/OpenAIError/RateLimitError" with pytest.raises(WrappedOpenAIError) as exc_info: completion(connection=azure_open_ai_connection, prompt="hello", deployment_name="text-ada-001") assert to_openai_error_message(dummyEx) == exc_info.value.message assert mock_method.call_count == 1 assert exc_info.value.error_codes == error_codes.split("/") @pytest.mark.parametrize( "dummyExceptionList", [ ( [ RateLimitError("Something went wrong", response=httpx.Response( 429, request=httpx.Request('GET', 'https://www.example.com')), body=None), APITimeoutError(request=httpx.Request('GET', 'https://www.example.com')), APIConnectionError( message="('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))", request=httpx.Request('GET', 'https://www.example.com')), InternalServerError("Something went wrong", response=httpx.Response( 503, request=httpx.Request('GET', 'https://www.example.com')), body=None), UnprocessableEntityError("Something went wrong", response=httpx.Response( 422, request=httpx.Request('GET', 'https://www.example.com')), body=None) ] ), ], ) def test_retriable_openai_error_handle(self, mocker: MockerFixture, dummyExceptionList): for dummyEx in dummyExceptionList: # Patch the test_method to throw the desired exception patched_test_method = mocker.patch("openai.resources.Completions.create", side_effect=dummyEx) # Apply the retry decorator to the patched test_method max_retry = 2 delay = 0.2 decorated_test_method = handle_openai_error(tries=max_retry, delay=delay)(patched_test_method) mock_sleep = mocker.patch("time.sleep") # Create a separate mock for time.sleep with pytest.raises(UserErrorException) as exc_info: decorated_test_method() assert patched_test_method.call_count == max_retry + 1 assert "Exceed max retry times. " + to_openai_error_message(dummyEx) == exc_info.value.message error_codes = "UserError/OpenAIError/" + type(dummyEx).__name__ assert exc_info.value.error_codes == error_codes.split("/") expected_calls = [ mocker.call(delay), mocker.call(delay * 2), ] mock_sleep.assert_has_calls(expected_calls) @pytest.mark.parametrize( "dummyExceptionList", [ ( [ RateLimitError("Something went wrong", response=httpx.Response( 429, request=httpx.Request('GET', 'https://www.example.com'), headers={"retry-after": "0.3"}), body=None), InternalServerError("Something went wrong", response=httpx.Response( 503, request=httpx.Request('GET', 'https://www.example.com'), headers={"retry-after": "0.3"}), body=None), UnprocessableEntityError("Something went wrong", response=httpx.Response( 422, request=httpx.Request('GET', 'https://www.example.com'), headers={"retry-after": "0.3"}), body=None) ] ), ], ) def test_retriable_openai_error_handle_with_header( self, mocker: MockerFixture, dummyExceptionList ): for dummyEx in dummyExceptionList: # Patch the test_method to throw the desired exception patched_test_method = mocker.patch("promptflow.tools.aoai.completion", side_effect=dummyEx) # Apply the retry decorator to the patched test_method max_retry = 2 delay = 0.2 header_delay = 0.3 decorated_test_method = handle_openai_error(tries=max_retry, delay=delay)(patched_test_method) mock_sleep = mocker.patch("time.sleep") # Create a separate mock for time.sleep with pytest.raises(UserErrorException) as exc_info: decorated_test_method() assert patched_test_method.call_count == max_retry + 1 assert "Exceed max retry times. " + to_openai_error_message(dummyEx) == exc_info.value.message error_codes = "UserError/OpenAIError/" + type(dummyEx).__name__ assert exc_info.value.error_codes == error_codes.split("/") expected_calls = [ mocker.call(header_delay), mocker.call(header_delay * 2), ] mock_sleep.assert_has_calls(expected_calls) @pytest.mark.parametrize( "dummyExceptionList", [ ( [ AuthenticationError("Something went wrong", response=httpx.get('https://www.example.com'), body=None), BadRequestError("Something went wrong", response=httpx.get('https://www.example.com'), body=None), APIConnectionError(message="Something went wrong", request=httpx.Request('GET', 'https://www.example.com')), ] ), ], ) def test_non_retriable_openai_error_handle( self, azure_open_ai_connection, mocker: MockerFixture, dummyExceptionList ): for dummyEx in dummyExceptionList: mock_method = mocker.patch("openai.resources.Completions.create", side_effect=dummyEx) with pytest.raises(UserErrorException) as exc_info: completion(connection=azure_open_ai_connection, prompt="hello", deployment_name="text-ada-001") assert to_openai_error_message(dummyEx) == exc_info.value.message error_codes = "UserError/OpenAIError/" + type(dummyEx).__name__ assert exc_info.value.error_codes == error_codes.split("/") assert mock_method.call_count == 1 def test_unexpected_error_handle(self, azure_open_ai_connection, mocker: MockerFixture): dummyEx = Exception("Something went wrong") chat(connection=azure_open_ai_connection, prompt="user:\nhello", deployment_name="gpt-35-turbo") mock_method = mocker.patch("openai.resources.chat.Completions.create", side_effect=dummyEx) error_codes = "UserError/LLMError" with pytest.raises(LLMError) as exc_info: chat(connection=azure_open_ai_connection, prompt="user:\nhello", deployment_name="gpt-35-turbo") assert to_openai_error_message(dummyEx) != exc_info.value.args[0] assert "OpenAI API hits exception: Exception: Something went wrong" == exc_info.value.message assert mock_method.call_count == 1 assert exc_info.value.error_codes == error_codes.split("/") def test_template_syntax_error_handle(self, azure_open_ai_connection, mocker: MockerFixture): dummyEx = TemplateSyntaxError(message="Something went wrong", lineno=1) mock_method = mocker.patch("jinja2.Template.__new__", side_effect=dummyEx) error_codes = "UserError/ToolValidationError/JinjaTemplateError" with pytest.raises(JinjaTemplateError) as exc_info: chat(connection=azure_open_ai_connection, prompt="user:\nhello", deployment_name="gpt-35-turbo") error_message = "Failed to render jinja template: TemplateSyntaxError: Something went wrong\n line 1. " \ + "Please modify your prompt to fix the issue." assert error_message == exc_info.value.message assert mock_method.call_count == 1 assert exc_info.value.error_codes == error_codes.split("/") @pytest.mark.skip_if_no_api_key("open_ai_connection") def test_model_not_accept_functions_as_param( self, open_ai_connection, example_prompt_template, functions): with pytest.raises(WrappedOpenAIError) as exc_info: openai_chat( connection=open_ai_connection, prompt=example_prompt_template, model="gpt-3.5-turbo-0301", functions=functions ) assert "Current model does not support the `functions` parameter" in exc_info.value.message def test_input_invalid_function_role_prompt(self, azure_open_ai_connection): with pytest.raises(ChatAPIFunctionRoleInvalidFormat) as exc_info: chat( connection=azure_open_ai_connection, prompt="function:\n This is function role prompt", deployment_name="gpt-35-turbo" ) assert "'name' is required if role is function," in exc_info.value.message def test_completion_with_chat_model(self, azure_open_ai_connection): with pytest.raises(UserErrorException) as exc_info: completion(connection=azure_open_ai_connection, prompt="hello", deployment_name="gpt-35-turbo") msg = "Completion API is a legacy api and is going to be deprecated soon. " \ "Please change to use Chat API for current model." assert msg in exc_info.value.message def test_model_not_support_image_input( self, azure_open_ai_connection, example_prompt_template_with_image, example_image): aoai = AzureOpenAIVision(azure_open_ai_connection) with pytest.raises(WrappedOpenAIError) as exc_info: aoai.chat( prompt=example_prompt_template_with_image, deployment_name="gpt-35-turbo", max_tokens=480, temperature=0, question="which number did you see in this picture?", image_input=example_image, ) assert "Current model does not support the image input" in exc_info.value.message @pytest.mark.parametrize( "max_tokens, error_message, error_codes, exception", [ (0, "0 is less than the minimum of 1", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError), (-1, "-1 is less than the minimum of 1", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError), ("asd", "ValueError: invalid literal for int()", "UserError/LLMError", LLMError) ] ) def test_aoai_invalid_max_tokens( self, azure_open_ai_connection, example_prompt_template, chat_history, max_tokens, error_message, error_codes, exception): with pytest.raises(exception) as exc_info: chat( connection=azure_open_ai_connection, prompt=example_prompt_template, deployment_name="gpt-35-turbo", max_tokens=max_tokens, temperature=0, user_input="Write a slogan for product X", chat_history=chat_history, ) assert error_message in exc_info.value.message assert exc_info.value.error_codes == error_codes.split("/")
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_acs.py
import pytest from promptflow.tools.azure_content_safety import analyze_text @pytest.mark.usefixtures("use_secrets_config_file") class TestAzureContentSafety: def test_azure_content_safety_analyze_happy_path(self, azure_content_safety_connection): text = "I hate you." result = analyze_text( connection=azure_content_safety_connection, text=text ) assert "suggested_action" in result assert "action_by_category" in result
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_openai.py
import pytest import json from promptflow.tools.openai import chat, completion, OpenAI from promptflow.tools.exception import WrappedOpenAIError @pytest.fixture def openai_provider(open_ai_connection) -> OpenAI: return OpenAI(open_ai_connection) @pytest.mark.usefixtures("use_secrets_config_file") @pytest.mark.skip_if_no_api_key("open_ai_connection") class TestOpenAI: def test_openai_completion(self, openai_provider): prompt_template = "please complete this sentence: world war II " openai_provider.completion(prompt=prompt_template) def test_openai_stream_completion(self, openai_provider): prompt_template = "please complete this sentence: world war II " openai_provider.completion(prompt=prompt_template, stream=True) def test_openai_completion_api(self, open_ai_connection): prompt_template = "please complete this sentence: world war II " completion(open_ai_connection, prompt=prompt_template) def test_openai_chat(self, openai_provider, example_prompt_template, chat_history): result = openai_provider.chat( prompt=example_prompt_template, model="gpt-3.5-turbo", max_tokens=32, temperature=0, user_input="Fill in more details about trend 2.", chat_history=chat_history, ) assert "trend 2" in result.lower() def test_openai_stream_chat(self, openai_provider, example_prompt_template, chat_history): result = openai_provider.chat( prompt=example_prompt_template, model="gpt-3.5-turbo", max_tokens=32, temperature=0, user_input="Fill in more details about trend 2.", chat_history=chat_history, stream=True, ) answer = "" while True: try: answer += next(result) except Exception: break assert "trend 2" in answer.lower() def test_openai_chat_api(self, open_ai_connection, example_prompt_template, chat_history): result = chat( connection=open_ai_connection, prompt=example_prompt_template, model="gpt-3.5-turbo", max_tokens="inF", temperature=0, user_input="Write a slogan for product X", chat_history=chat_history, ) assert "Product X".lower() in result.lower() def test_openai_prompt_with_function( self, open_ai_connection, example_prompt_template_with_function, functions): result = chat( connection=open_ai_connection, prompt=example_prompt_template_with_function, model="gpt-3.5-turbo", temperature=0, # test input functions. functions=functions, # test input prompt containing function role. name="get_location", result=json.dumps({"location": "Austin"}), question="What is the weather in Boston?", prev_question="Where is Boston?" ) assert result["function_call"]["name"] == "get_current_weather" def test_openai_chat_with_response_format(self, open_ai_connection, example_prompt_template, chat_history): result = chat( connection=open_ai_connection, prompt=example_prompt_template, model="gpt-4-1106-preview", temperature=0, user_input="Write a slogan for product X, please reponse with json.", chat_history=chat_history, response_format={"type": "json_object"} ) assert "Product X".lower() in result.lower() @pytest.mark.parametrize( "response_format, user_input, error_message, error_codes, exception", [ ({"type": "json"}, "Write a slogan for product X, please reponse with json.", "\'json\' is not one of [\'json_object\', \'text\']", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError), ({"type": "json_object"}, "Write a slogan for product X", "\'messages\' must contain the word \'json\' in some form", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError), ({"types": "json_object"}, "Write a slogan for product X", "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}", "UserError/OpenAIError/BadRequestError", WrappedOpenAIError) ] ) def test_openai_chat_with_invalid_response_format( self, open_ai_connection, example_prompt_template, chat_history, response_format, user_input, error_message, error_codes, exception ): with pytest.raises(exception) as exc_info: chat( connection=open_ai_connection, prompt=example_prompt_template, model="gpt-4-1106-preview", temperature=0, user_input=user_input, chat_history=chat_history, response_format=response_format ) assert error_message in exc_info.value.message assert exc_info.value.error_codes == error_codes.split("/") def test_openai_chat_with_not_support_response_format_json_mode_model( self, open_ai_connection, example_prompt_template, chat_history ): with pytest.raises(WrappedOpenAIError) as exc_info: chat( connection=open_ai_connection, prompt=example_prompt_template, model="gpt-3.5-turbo", temperature=0, user_input="Write a slogan for product X, please reponse with json.", chat_history=chat_history, response_format={"type": "json_object"} ) error_message = "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}." assert error_message in exc_info.value.message assert exc_info.value.error_codes == "UserError/OpenAIError/BadRequestError".split("/") def test_openai_chat_with_response_format_text_mode( self, open_ai_connection, example_prompt_template, chat_history ): result = chat( connection=open_ai_connection, prompt=example_prompt_template, model="gpt-3.5-turbo", temperature=0, user_input="Write a slogan for product X.", chat_history=chat_history, response_format={"type": "text"} ) assert "Product X".lower() in result.lower()
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_openai_gpt4v.py
import pytest from promptflow.tools.openai_gpt4v import OpenAI @pytest.fixture def openai_provider(open_ai_connection) -> OpenAI: return OpenAI(open_ai_connection) @pytest.mark.usefixtures("use_secrets_config_file") @pytest.mark.skip_if_no_api_key("open_ai_connection") class TestOpenAIGPT4V: def test_openai_gpt4v_chat(self, openai_provider, example_prompt_template_with_image, example_image): result = openai_provider.chat( prompt=example_prompt_template_with_image, model="gpt-4-vision-preview", max_tokens=480, temperature=0, question="which number did you see in this picture?", image_input=example_image, ) assert "10" == result def test_openai_gpt4v_stream_chat(self, openai_provider, example_prompt_template_with_image, example_image): result = openai_provider.chat( prompt=example_prompt_template_with_image, model="gpt-4-vision-preview", max_tokens=480, temperature=0, question="which number did you see in this picture?", image_input=example_image, ) answer = "" while True: try: answer += next(result) except Exception: break assert "10" == result
0
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs/prompt_templates/prompt_with_image.jinja2
# system: As an AI assistant, your task involves interpreting images and responding to questions about the image. Remember to provide accurate answers based on the information present in the image. Directly give the answer, no more explanation is needed. # user: {{question}} ![image]({{image_input}})
0
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs/prompt_templates/prompt_with_function.jinja2
# system: Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous. # user: {{prev_question}} # assistant: {{prev_answer}} # function: ## name: {{name}} ## content: {{result}} # user: {{question}}
0
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs/prompt_templates/prompt_with_name_in_roles.jinja2
# system: ## name: AI ## content: Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous. # user: ## name: person ## content: {{prev_question}} # assistant: ## name: John ## content: {{prev_answer}} # function: ## name: {{name}} ## content: {{result}} # user: {{question}}
0
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs/prompt_templates
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs/prompt_templates/marketing_writer/prompt.jinja2
# System: You are a marketing writing assistant.For user: You help come up with creative content ideas and content like marketing emails, blog posts, tweets, ad copy and product descriptions.You write in a friendly yet professional tone but can tailor your writing style that best works for a user-specified audience.If you do not know the answer to a question, respond by saying "I do not know the answer to your question." {% for item in chat_history %} # user: {{item.inputs.user_input}} # assistant: {{item.outputs.response}} {% endfor %} # user: {{user_input}}
0
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs/prompt_templates
promptflow_repo/promptflow/src/promptflow-tools/tests/test_configs/prompt_templates/marketing_writer/history.json
[ { "inputs": { "user_input": "hello!" }, "outputs": { "response": "Hello! How can I help you today? If you need any creative content ideas or assistance with marketing emails, blog posts, tweets, ad copy, or product descriptions, feel free to ask. I'm here to help!" } }, { "inputs": { "user_input": "I want to share an idea about latest sports business trend." }, "outputs": { "response": "That sounds interesting! Here's a creative content idea for a blog post or article discussing the latest sports business trends:\n\nTitle: \"Game Changers: The Latest Trends Revolutionizing the Sports Business Industry\"\n\nIntroduction:\n- Briefly discuss the importance of sports as a global business and its ever-changing landscape.\n- Mention that the article will explore the latest sports business trends that are shaping the industry.\n\nTrend 1: Fan Engagement Through Digital and Social Media Platforms\n- Explain how teams and leagues are investing in digital and social media platforms to connect with fans and enhance their experience.\n- Discuss the use of platforms such as Twitter, Facebook, and Instagram to engage fans, sell merchandise, and promote content.\n\nTrend 2: Esports and the Rise of Competitive Gaming\n- Discuss the rapid growth of esports and how it has caught the attention of traditional sports teams, advertisers, and investors.\n- Explain the opportunities esports presents for the sports industry, such as revenue from sponsorships, streaming rights, and live events.\n\nTrend 3: Sports Betting Legalization\n- Examine the impact of the increasing legalization of sports betting on the sports industry.\n- Discuss how teams, leagues, and businesses can capitalize on this trend by providing dedicated betting platforms and partnerships with sportsbooks.\n\nTrend 4: Sustainable Practices in Sports\n- Explore how teams and leagues are becoming more environmentally conscious and incorporating sustainable practices into their operations.\n- Mention examples like using renewable energy, reducing waste, and encouraging public transportation.\n\nConclusion:\n- Recap the latest sports business trends that are revolutionizing the industry.\n- Emphasize the need for adaptability and innovation in the sports business world.\n\nFeel free to modify this outline according to your preferences or add more trends if you'd like. Good luck with your content creation!" } } ]
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/CHANGELOG.md
# Release History ## 1.5.0 (Upcoming) ### Features Added ### Bugs Fixed - [SDK/CLI] The inputs of node test allows the value of reference node output be passed directly in. ### Improvements - [SDK/CLI] For `pf run delete`, `pf connection delete`, introducing an option to skip confirmation prompts. ## 1.4.0 (2024.01.22) ### Features Added - [Executor] Calculate system_metrics recursively in api_calls. - [Executor] Add flow root level api_calls, so that user can overview the aggregated metrics of a flow. - [Executor] Add @trace decorator to make it possible to log traces for functions that are called by tools. - [SDK/CLI][azure] Switch automatic runtime's session provision to system wait. - [SDK/CLI] Add `--skip-open-browser` option to `pf flow serve` to skip opening browser. - [SDK/CLI][azure] Support submit flow to sovereign cloud. - [SDK/CLI] Support `pf run delete` to delete a run irreversibly. - [SDK/CLI][azure] Automatically put requirements.txt to flow.dag.yaml if exists in flow snapshot. - [SDK/CLI] Support `pf upgrade` to upgrade prompt flow to the latest version. - [SDK/CLI] Support env variables in yaml file. ### Bugs Fixed - Fix unaligned inputs & outputs or pandas exception during get details against run in Azure. - Fix loose flow path validation for run schema. - Fix "Without Import Data" in run visualize page results from invalid JSON value (`-Infinity`, `Infinity` and `NaN`). - Fix "ValueError: invalid width -1" when show-details against long column(s) in narrow terminal window. - Fix invalid tool code generated when initializing the script tool with icon. ### Improvements - [SDK/CLI] For `pfazure flow create`: - If used by non-msft tenant user, use user name instead of user object id in the remote flow folder path. (e.g. `Users/<user-name>/promptflow`). - When flow has unknown attributes, log warning instead of raising error. - Use local flow folder name and timestamp as the azure flow file share folder name. - [SDK/CLI] For `pf/pfazure run create`, when run has unknown attribute, log warning instead of raising error. - Replace `pyyaml` with `ruamel.yaml` to adopt YAML 1.2 specification. ## 1.3.0 (2023.12.27) ### Features Added - [SDK/CLI] Support `pfazure run cancel` to cancel a run on Azure AI. - Add support to configure prompt flow home directory via environment variable `PF_HOME_DIRECTORY`. - Please set before importing `promptflow`, otherwise it won't take effect. - [Executor] Handle KeyboardInterrupt in flow test so that the final state is Canceled. ### Bugs Fixed - [SDK/CLI] Fix single node run doesn't work when consuming sub item of upstream node ### Improvements - Change `ruamel.yaml` lower bound to 0.17.10. - [SDK/CLI] Improve `pfazure run download` to handle large run data files. - [Executor] Exit the process when all async tools are done or exceeded timeout after cancellation. ## 1.2.0 (2023.12.14) ### Features Added - [SDK/CLI] Support `pfazure run download` to download run data from Azure AI. - [SDK/CLI] Support `pf run create` to create a local run record from downloaded run data. ### Bugs Fixed - [SDK/CLI] Removing telemetry warning when running commands. - Empty node stdout & stderr to avoid large visualize HTML. - Hide unnecessary fields in run list for better readability. - Fix bug that ignores timeout lines in batch run status summary. ## 1.1.1 (2023.12.1) ### Bugs Fixed - [SDK/CLI] Fix compatibility issue with `semantic-kernel==0.4.0.dev0` and `azure-ai-ml==1.12.0`. - [SDK/CLI] Add back workspace information in CLI telemetry. - [SDK/CLI] Disable the feature to customize user agent in CLI to avoid changes on operation context. - Fix openai metrics calculator to adapt openai v1. ## 1.1.0 (2023.11.30) ### Features Added - Add `pfazure flow show/list` to show or list flows from Azure AI. - Display node status in run visualize page graph view. - Add support for image input and output in prompt flow. - [SDK/CLI] SDK/CLI will collect telemetry by default, user can use `pf config set telemetry.enabled=false` to opt out. - Add `raise_on_error` for stream run API, by default we raise for failed run. - Flow as function: consume a flow like a function with parameters mapped to flow inputs. - Enable specifying the default output path for run. - Use `pf config set run.output_path=<output-path>` to specify, and the run output path will be `<output-path>/<run-name>`. - Introduce macro `${flow_directory}` for `run.output_path` in config, which will be replaced with corresponding flow directory. - The flow directory cannot be set as run output path, which means `pf config set run.output_path='${flow_directory}'` is invalid; but you can use child folder, e.g. `pf config set run.output_path='${flow_directory}/.runs'`. - Support pfazure run create with remote flow. - For remote workspace flow: `pfazure run create --flow azureml:<flow-name>` - For remote registry flow: `pfazure run create --flow azureml://registries/<registry-name>/models/<flow-name>/versions/<flow-version>` - Support set logging level via environment variable `PF_LOGGING_LEVEL`, valid values includes `CRITICAL`, `ERROR`, `WARNING`, `INFO`, `DEBUG`, default to `INFO`. - Remove openai version restrictions ### Bugs Fixed - [SDK/CLI] Fix node test with dict node input will raise "Required input(s) missing". - [SDK/CLI] Will use run name as display name when display name not specified (used flow folder name before). - [SDK/CLI] Fix pf flow build created unexpected layer of dist folder - [SDK/CLI] Fix deploy prompt flow: connections value may be none ### Improvements - Force 'az login' if using azureml connection provider in cli command. - Add env variable 'PF_NO_INTERACTIVE_LOGIN' to disable interactive login if using azureml connection provider in promptflow sdk. - Improved CLI invoke time. - Bump `pydash` upper bound to 8.0.0. - Bump `SQLAlchemy` upper bound to 3.0.0. - Bump `flask` upper bound to 4.0.0, `flask-restx` upper bound to 2.0.0. - Bump `ruamel.yaml` upper bound to 1.0.0. ## 1.0.0 (2023.11.09) ### Features Added - [Executor] Add `enable_kwargs` tag in tools.json for customer python tool. - [SDK/CLI] Support `pfazure flow create`. Create a flow on Azure AI from local flow folder. - [SDK/CLI] Changed column mapping `${run.inputs.xx}`'s behavior, it will refer to run's data columns instead of run's inputs columns. ### Bugs Fixed - [SDK/CLI] Keep original format in run output.jsonl. - [Executor] Fix the bug that raise an error when an aggregation node references a bypassed node ### Improvements - [Executor] Set the outputs of the bypassed nodes as None ## 0.1.0b8 (2023.10.26) ### Features Added - [Executor] Add average execution time and estimated execution time to batch run logs - [SDK/CLI] Support `pfazure run archive/restore/update`. - [SDK/CLI] Support custom strong type connection. - [SDK/CLI] Enable telemetry and won't collect by default, use `pf config set cli.telemetry_enabled=true` to opt in. - [SDK/CLI] Exposed function `from promptflow import load_run` to load run object from local YAML file. - [Executor] Support `ToolProvider` for script tools. ### Bugs Fixed - **pf config set**: - Fix bug for workspace `connection.provider=azureml` doesn't work as expected. - [SDK/CLI] Fix the bug that using sdk/cli to submit batch run did not display the log correctly. - [SDK/CLI] Fix encoding issues when input is non-English with `pf flow test`. - [Executor] Fix the bug can't read file containing "Private Use" unicode character. - [SDK/CLI] Fix string type data will be converted to integer/float. - [SDK/CLI] Remove the max rows limitation of loading data. - [SDK/CLI] Fix the bug --set not taking effect when creating run from file. ### Improvements - [SDK/CLI] Experience improvements in `pf run visualize` page: - Add column status. - Support opening flow file by clicking run id. ## 0.1.0b7.post1 (2023.09.28) ### Bug Fixed - Fix extra dependency bug when importing `promptflow` without `azure-ai-ml` installed. ## 0.1.0b7 (2023.09.27) ### Features Added - **pf flow validate**: support validate flow - **pf config set**: support set user-level promptflow config. - Support workspace connection provider, usage: `pf config set connection.provider=azureml://subscriptions/<subscription_id>/resourceGroups/<resource_group>/providers/Microsoft.MachineLearningServices/workspaces/<workspace_name>` - Support override openai connection's model when submitting a flow. For example: `pf run create --flow ./ --data ./data.jsonl --connection llm.model=xxx --column-mapping url='${data.url}'` ### Bugs Fixed - [Flow build] Fix flow build file name and environment variable name when connection name contains space. - Reserve `.promptflow` folder when dump run snapshot. - Read/write log file with encoding specified. - Avoid inconsistent error message when executor exits abnormally. - Align inputs & outputs row number in case partial completed run will break `pfazure run show-details`. - Fix bug that failed to parse portal url for run data when the form is an asset id. - Fix the issue of process hanging for a long time when running the batch run. ### Improvements - [Executor][Internal] Improve error message with more details and actionable information. - [SDK/CLI] `pf/pfazure run show-details`: - Add `--max-results` option to control the number of results to display. - Add `--all-results` option to display all results. - Add validation for azure `PFClient` constructor in case wrong parameter is passed. ## 0.1.0b6 (2023.09.15) ### Features Added - [promptflow][Feature] Store token metrics in run properties ### Bugs Fixed - Refine error message body for flow_validator.py - Refine error message body for run_tracker.py - [Executor][Internal] Add some unit test to improve code coverage of log/metric - [SDK/CLI] Update portal link to remove flight. - [Executor][Internal] Improve inputs mapping's error message. - [API] Resolve warnings/errors of sphinx build ## 0.1.0b5 (2023.09.08) ### Features Added - **pf run visualize**: support lineage graph & display name in visualize page ### Bugs Fixed - Add missing requirement `psutil` in `setup.py` ## 0.1.0b4 (2023.09.04) ### Features added - Support `pf flow build` commands ## 0.1.0b3 (2023.08.30) - Minor bug fixes. ## 0.1.0b2 (2023.08.29) - First preview version with major CLI & SDK features. ### Features added - **pf flow**: init/test/serve/export - **pf run**: create/update/stream/list/show/show-details/show-metrics/visualize/archive/restore/export - **pf connection**: create/update/show/list/delete - Azure AI support: - **pfazure run**: create/list/stream/show/show-details/show-metrics/visualize ## 0.1.0b1 (2023.07.20) - Stub version in Pypi.
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/README.md
# Prompt flow [![Python package](https://img.shields.io/pypi/v/promptflow)](https://pypi.org/project/promptflow/) [![Python](https://img.shields.io/pypi/pyversions/promptflow.svg?maxAge=2592000)](https://pypi.python.org/pypi/promptflow/) [![PyPI - Downloads](https://img.shields.io/pypi/dm/promptflow)](https://pypi.org/project/promptflow/) [![CLI](https://img.shields.io/badge/CLI-reference-blue)](https://microsoft.github.io/promptflow/reference/pf-command-reference.html) [![vsc extension](https://img.shields.io/visual-studio-marketplace/i/prompt-flow.prompt-flow?logo=Visual%20Studio&label=Extension%20)](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) [![Doc](https://img.shields.io/badge/Doc-online-green)](https://microsoft.github.io/promptflow/index.html) [![Issue](https://img.shields.io/github/issues/microsoft/promptflow)](https://github.com/microsoft/promptflow/issues/new/choose) [![Discussions](https://img.shields.io/github/discussions/microsoft/promptflow)](https://github.com/microsoft/promptflow/issues/new/choose) [![CONTRIBUTING](https://img.shields.io/badge/Contributing-8A2BE2)](https://github.com/microsoft/promptflow/blob/main/CONTRIBUTING.md) [![License: MIT](https://img.shields.io/github/license/microsoft/promptflow)](https://github.com/microsoft/promptflow/blob/main/LICENSE) > Welcome to join us to make prompt flow better by > participating [discussions](https://github.com/microsoft/promptflow/discussions), > opening [issues](https://github.com/microsoft/promptflow/issues/new/choose), > submitting [PRs](https://github.com/microsoft/promptflow/pulls). **Prompt flow** is a suite of development tools designed to streamline the end-to-end development cycle of LLM-based AI applications, from ideation, prototyping, testing, evaluation to production deployment and monitoring. It makes prompt engineering much easier and enables you to build LLM apps with production quality. With prompt flow, you will be able to: - **Create and iteratively develop flow** - Create executable [flows](https://microsoft.github.io/promptflow/concepts/concept-flows.html) that link LLMs, prompts, Python code and other [tools](https://microsoft.github.io/promptflow/concepts/concept-tools.html) together. - Debug and iterate your flows, especially the [interaction with LLMs](https://microsoft.github.io/promptflow/concepts/concept-connections.html) with ease. - **Evaluate flow quality and performance** - Evaluate your flow's quality and performance with larger datasets. - Integrate the testing and evaluation into your CI/CD system to ensure quality of your flow. - **Streamlined development cycle for production** - Deploy your flow to the serving platform you choose or integrate into your app's code base easily. - (Optional but highly recommended) Collaborate with your team by leveraging the cloud version of [prompt flow in Azure AI](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/overview-what-is-prompt-flow?view=azureml-api-2). ------ ## Installation Ensure you have a python environment, `python=3.9` is recommended. ```sh pip install promptflow promptflow-tools ``` ## Quick Start ⚡ **Create a chatbot with prompt flow** Run the command to initiate a prompt flow from a chat template, it creates folder named `my_chatbot` and generates required files within it: ```sh pf flow init --flow ./my_chatbot --type chat ``` **Setup a connection for your API key** For OpenAI key, establish a connection by running the command, using the `openai.yaml` file in the `my_chatbot` folder, which stores your OpenAI key: ```sh # Override keys with --set to avoid yaml file changes pf connection create --file ./my_chatbot/openai.yaml --set api_key=<your_api_key> --name open_ai_connection ``` For Azure OpenAI key, establish the connection by running the command, using the `azure_openai.yaml` file: ```sh pf connection create --file ./my_chatbot/azure_openai.yaml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection ``` **Chat with your flow** In the `my_chatbot` folder, there's a `flow.dag.yaml` file that outlines the flow, including inputs/outputs, nodes, connection, and the LLM model, etc > Note that in the `chat` node, we're using a connection named `open_ai_connection` (specified in `connection` field) and the `gpt-35-turbo` model (specified in `deployment_name` field). The deployment_name filed is to specify the OpenAI model, or the Azure OpenAI deployment resource. Interact with your chatbot by running: (press `Ctrl + C` to end the session) ```sh pf flow test --flow ./my_chatbot --interactive ``` #### Continue to delve deeper into [prompt flow](https://github.com/microsoft/promptflow).
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/.env.example
DEFAULT_SUBSCRIPTION_ID="your-subscription-id" DEFAULT_RESOURCE_GROUP_NAME="your-resource-group-name" DEFAULT_WORKSPACE_NAME="your-workspace-name" DEFAULT_RUNTIME_NAME="test-runtime-ci" PROMPT_FLOW_TEST_MODE="replay"
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/pf.bat
@echo off setlocal SET PF_INSTALLER=PIP IF EXIST "%~dp0\python.exe" ( "%~dp0\python.exe" -m promptflow._cli._pf.entry %* ) ELSE ( python -m promptflow._cli._pf.entry %* )
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/dev_requirements.txt
azure-identity azure-keyvault beautifulsoup4==4.12.2 coverage keyrings.alt # this is possibly insecure, do not add this to setup.py mock nox portalocker~=1.2 pre-commit # promptflow-tools Comment this since it will take in unecessary dependencies pydash PyJWT==2.8.0 # parse token to get tenant id during sanitization pytest pytest-asyncio pytest-cov pytest-forked pytest-mock pytest-nunit pytest-sugar pytest-timeout pytest-xdist setuptools setuptools_rust twine==4.0.0 vcrpy==5.1.0 # record and replay http requests for pfazure tests wheel httpx # test dummy flow run in notebook, give a minimal version for vulnerability issue ipykernel>=6.27.1 papermill>=2.5.0
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/setup.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import os import re from pathlib import Path from typing import Any, Match, cast from setuptools import find_packages, setup PACKAGE_NAME = "promptflow" PACKAGE_FOLDER_PATH = Path(__file__).parent / "promptflow" with open(os.path.join(PACKAGE_FOLDER_PATH, "_version.py"), encoding="utf-8") as f: version = cast(Match[Any], re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE)).group(1) with open("README.md", encoding="utf-8") as f: readme = f.read() with open("CHANGELOG.md", encoding="utf-8") as f: changelog = f.read() REQUIRES = [ "psutil", # get process information when bulk run "httpx>=0.25.1", # used to send http requests asynchronously "openai", # promptflow._core.api_injector "flask>=2.2.3,<4.0.0", # Serving endpoint requirements "sqlalchemy>=1.4.48,<3.0.0", # sqlite requirements # note that pandas 1.5.3 is the only version to test in ci before promptflow 0.1.0b7 is released # and pandas 2.x.x will be the only version to test in ci after that. "pandas>=1.5.3,<3.0.0", # load data requirements "python-dotenv>=1.0.0,<2.0.0", # control plane sdk requirements, to load .env file "keyring>=24.2.0,<25.0.0", # control plane sdk requirements, to access system keyring service "pydash>=6.0.0,<8.0.0", # control plane sdk requirements, to support parameter overrides in schema. # vulnerability: https://github.com/advisories/GHSA-5cpq-8wj7-hf2v "cryptography>=41.0.3,<42.0.0", # control plane sdk requirements to support connection encryption "colorama>=0.4.6,<0.5.0", # producing colored terminal text for testing chat flow "tabulate>=0.9.0,<1.0.0", # control plane sdk requirements, to print table in console "filelock>=3.4.0,<4.0.0", # control plane sdk requirements, to lock for multiprocessing # We need to pin the version due to the issue: https://github.com/hwchase17/langchain/issues/5113 "marshmallow>=3.5,<4.0.0", "gitpython>=3.1.24,<4.0.0", # used git info to generate flow id "tiktoken>=0.4.0", "strictyaml>=1.5.0,<2.0.0", # used to identify exact location of validation error "waitress>=2.1.2,<3.0.0", # used to serve local service "opencensus-ext-azure<2.0.0", # configure opencensus to send telemetry to azure monitor "ruamel.yaml>=0.17.10,<1.0.0", # used to generate connection templates with preserved comments "pyarrow>=14.0.1,<15.0.0", # used to read parquet file with pandas.read_parquet "pillow>=10.1.0,<11.0.0", # used to generate icon data URI for package tool "filetype>=1.2.0", # used to detect the mime type for mulitmedia input "jsonschema>=4.0.0,<5.0.0", # used to validate tool "docutils", # used to generate description for tools ] setup( name=PACKAGE_NAME, version=version, description="Prompt flow Python SDK - build high-quality LLM apps", long_description_content_type="text/markdown", long_description=readme + "\n\n" + changelog, license="MIT License", author="Microsoft Corporation", author_email="[email protected]", url="https://github.com/microsoft/promptflow", classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires="<4.0,>=3.8", install_requires=REQUIRES, extras_require={ "azure": [ "azure-core>=1.26.4,<2.0.0", "azure-storage-blob[aio]>=12.13.0,<13.0.0", # add [aio] for async run download feature "azure-identity>=1.12.0,<2.0.0", "azure-ai-ml>=1.11.0,<2.0.0", "pyjwt>=2.4.0,<3.0.0", # requirement of control plane SDK ], "executable": ["pyinstaller>=5.13.2", "streamlit>=1.26.0", "streamlit-quill<0.1.0", "bs4"], "pfs": [ "flask-restx>=1.2.0,<2.0.0", ], "azureml-serving": [ # AzureML connection dependencies "azure-identity>=1.12.0,<2.0.0", "azure-ai-ml>=1.11.0,<2.0.0", # OTel dependencies for monitoring "opentelemetry-api>=1.21.0,<2.0.0", "opentelemetry-sdk>=1.21.0,<2.0.0", "azure-monitor-opentelemetry>=1.1.1,<2.0.0", # MDC dependencies for monitoring "azureml-ai-monitoring>=0.1.0b3,<1.0.0", ], }, packages=find_packages(), scripts=[ 'pf', 'pf.bat' ], entry_points={ "console_scripts": [ "pfazure = promptflow._cli._pf_azure.entry:main", "pfs = promptflow._sdk._service.entry:main", ], }, include_package_data=True, project_urls={ "Bug Reports": "https://github.com/microsoft/promptflow/issues", "Source": "https://github.com/microsoft/promptflow", }, )
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/pf
#!/usr/bin/env python import sys import os if os.environ.get('PF_INSTALLER') is None: os.environ['PF_INSTALLER'] = 'PIP' os.execl(sys.executable, sys.executable, '-m', 'promptflow._cli._pf.entry', *sys.argv[1:])
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/MANIFEST.in
include promptflow/azure/resources/* include promptflow/_sdk/_serving/static/* recursive-include promptflow/_cli/data * recursive-include promptflow/_sdk/data *
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/dev-connections.json.example
{ "azure_open_ai_connection": { "type": "AzureOpenAIConnection", "value": { "api_key": "aoai-api-key", "api_base": "aoai-api-endpoint", "api_type": "azure", "api_version": "2023-07-01-preview" }, "module": "promptflow.connections" }, "bing_config": { "type": "BingConnection", "value": { "api_key": "bing-api-key" }, "module": "promptflow.connections" }, "bing_connection": { "type": "BingConnection", "value": { "api_key": "bing-api-key" }, "module": "promptflow.connections" }, "azure_content_safety_config": { "type": "AzureContentSafetyConnection", "value": { "api_key": "content-safety-api-key", "endpoint": "https://content-safety-canary-test.cognitiveservices.azure.com", "api_version": "2023-04-30-preview" }, "module": "promptflow.connections" }, "serp_connection": { "type": "SerpConnection", "value": { "api_key": "serpapi-api-key" }, "module": "promptflow.connections" }, "translate_connection": { "type": "CustomConnection", "value": { "api_key": "<your-key>", "api_endpoint": "https://api.cognitive.microsofttranslator.com/", "api_region": "global" }, "module": "promptflow.connections", "module": "promptflow.connections", "secret_keys": [ "api_key" ] }, "custom_connection": { "type": "CustomConnection", "value": { "key1": "hey", "key2": "val2" }, "module": "promptflow.connections", "secret_keys": [ "key1" ] }, "custom_strong_type_connection": { "type": "CustomConnection", "value": { "api_key": "<your-key>", "api_base": "This is my first custom connection.", "promptflow.connection.custom_type": "MyFirstConnection", "promptflow.connection.module": "my_tool_package.connections" }, "module": "promptflow.connections", "secret_keys": [ "api_key" ] }, "open_ai_connection": { "type": "OpenAIConnection", "value": { "api_key": "<your-key>", "organization": "<your-organization>" }, "module": "promptflow.connections" } }
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/NOTICE.txt
NOTICES AND INFORMATION Do Not Translate or Localize This software incorporates material from third parties. Microsoft makes certain open source code available at https://3rdpartysource.microsoft.com, or you may send a check or money order for US $5.00, including the product name, the open source component name, platform, and version number, to: Source Code Compliance Team Microsoft Corporation One Microsoft Way Redmond, WA 98052 USA Notwithstanding any other terms, you may reverse engineer this software to the extent required to debug changes to any libraries licensed under the GNU Lesser General Public License. --------------------------------------------------------- openai 0.27.8 - MIT Copyright (c) OpenAI (https://openai.com) MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- flask 2.2.3 - BSD-2-Clause AND BSD-3-Clause Copyright 2010 Pallets copyright 2010 Pallets Copyright (c) 2015 CERN. (c) Copyright 2010 by http://domain.invalid/'> BSD-2-Clause AND BSD-3-Clause --------------------------------------------------------- --------------------------------------------------------- dataset 1.6.0 - MIT Copyright (c) 2013, Open Knowledge Foundation, Friedrich Lindenberg, Gregor Aisch MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- sqlalchemy 1.4.48 - MIT (c) Zeno Rocha Copyright (c) Microsoft Copyright Sphinx contributors Copyright 2007-2023 by the Sphinx team Copyright SQLAlchemy 1.4 Documentation (c) OpenJS Foundation and other contributors Copyright (c) 2005-2023 Michael Bayer and contributors Copyright (c) 2010 Gaetan de Menten [email protected] Copyright 2005-2023 SQLAlchemy authors and contributors Copyright (c) Microsoft Corporation', Microsoft SQL Azure Copyright (c) 2021 the SQLAlchemy authors and contributors Copyright (c) 2010-2011 Gaetan de Menten [email protected] Copyright 2007-2023, the SQLAlchemy authors and contributors copyright u'2007-2023, the SQLAlchemy authors and contributors Copyright (c) 2005-2023 the SQLAlchemy authors and contributors Copyright (c) 2006-2023 the SQLAlchemy authors and contributors Copyright (c) 2009-2023 the SQLAlchemy authors and contributors Copyright (c) 2010-2023 the SQLAlchemy authors and contributors Copyright (c) 2013-2023 the SQLAlchemy authors and contributors Copyright (c) 2020-2023 the SQLAlchemy authors and contributors copyright (c) 2007 Fisch Asset Management AG https://www.fam.ch MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- pandas 2.1.1 - BSD-2-Clause AND BSD-3-Clause Copyright (c) 2009', join Copyright 2014-2019, xarray Copyright (c) 2012 Google Inc. Copyright (c) 2015 Jared Hobbs Copyright (c) 1994 David Burren Copyright (c) 2011 Szabolcs Nagy Copyright (c) 2011 Valentin Ochs Copyright (c) 2017 Anthony Sottile Copyright (c) 2005-2014 Rich Felker Copyright (c) 2010, Albert Sweigart Copyright (c) 2002 Michael Ringgaard Copyright (c) 2003-2011 David Schultz Copyright (c) 2008 Stephen L. Moshier Copyright (c) 2011 by Enthought, Inc. Copyright 2017- dateutil contributors Copyright (c) 2003-2009 Bruce D. Evans Copyright (c) 2001-2008 Ville Laurikari Copyright (c) 2003-2009 Steven G. Kargl Copyright (c) 1993,2004 Sun Microsystems Copyright (c) 2001, 2002 Enthought, Inc. Copyright (c) 2003-2012 SciPy Developers Copyright (c) 2012, Lambda Foundry, Inc. Copyright (c) 1994 Sun Microsystems, Inc. Copyright (c) 2005-2011, NumPy Developers Copyright (c) 2017 - dateutil contributors Copyright (c) 2015- - dateutil contributors Copyright (c) 2016, PyData Development Team Copyright (c) 2020, PyData Development Team Copyright 2017- Paul Ganssle <[email protected]> Copyright (c) 2011-2022, Open source contributors Copyright (c) 2008 The Android Open Source Project Copyright (c) 2015- - Paul Ganssle <[email protected]> Copyright (c) 2010-2012 Archipel Asset Management AB. Copyright (c) 2007 Nick Galbreath nickg at modp dot com Copyright (c) Donald Stufft and individual contributors Copyright (c) 2014-2016 - Yaron de Leeuw <[email protected]> Copyright (c) 2019 Hadley Wickham RStudio and Evan Miller Copyright (c) 2008- Attractive Chaos <[email protected]> Copyright (c) 2003-2011 - Gustavo Niemeyer <[email protected]> Copyright (c) 1988-1993 The Regents of the University of California Copyright (c) 2011-2013, ESN Social Software AB and Jonas Tarnstrom Copyright (c) 2012-2014 - Tomi Pievilainen <[email protected]> Copyright (c) 1995-2001 Corporation for National Research Initiatives Copyright (c) 2008, 2009, 2011 by Attractive Chaos <[email protected]> Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, The Netherlands Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Python Software Foundation Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team BSD-2-Clause AND BSD-3-Clause --------------------------------------------------------- --------------------------------------------------------- python-dotenv 1.0.0 - BSD-2-Clause AND BSD-3-Clause Copyright (c) 2014, Saurabh Kumar BSD-2-Clause AND BSD-3-Clause --------------------------------------------------------- --------------------------------------------------------- keyring 24.2.0 - MIT MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- cryptography 41.0.3 - Apache-2.0 OR (Apache-2.0 AND BSD-3-Clause) Copyright 2013-2023 copyright 2013-2023, Individual Copyright (c) Individual contributors Copyright (c) 2005-2020, NumPy Developers Apache-2.0 OR (Apache-2.0 AND BSD-3-Clause) --------------------------------------------------------- --------------------------------------------------------- colorama 0.4.6 - BSD-2-Clause AND BSD-3-Clause Copyright Jonathan Hartley 2013 Copyright (c) 2010 Jonathan Hartley Copyright Jonathan Hartley & Arnon Yaari, 2013-2020 BSD-2-Clause AND BSD-3-Clause --------------------------------------------------------- --------------------------------------------------------- tabulate 0.9.0 - MIT Copyright (c) 2011-2020 Sergey Astanin and contributors MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- filelock 3.12.2 - Unlicense This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. In jurisdictions that recognize copyright laws, the author or authors of this software dedicate any and all copyright interest in the software to the public domain. We make this dedication for the benefit of the public at large and to the detriment of our heirs and successors. We intend this dedication to be an overt act of relinquishment in perpetuity of all present and future rights to this software under copyright law. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. For more information, please refer to <http://unlicense.org/> --------------------------------------------------------- --------------------------------------------------------- azure-core 1.25.1 - MIT Copyright (c) Microsoft Corporation MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- azure-storage-blob 12.13.1 - MIT Copyright (c) Microsoft Corporation MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- azure-identity 1.11.0 - MIT Copyright (c) Microsoft Corporation MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- azure-ai-ml 1.9.0 - MIT Copyright (c) Microsoft Corporation MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- pyjwt 2.5.0 - MIT Copyright 2015-2022 Jose Padilla copyright 2015-2022, Jose Padilla Copyright (c) 2015-2022 Jose Padilla MIT License Copyright (c) <year> <copyright holders> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------- --------------------------------------------------------- pathspec 0.10.1 - MPL-2.0 Copyright (c) 2013-2022 Caleb P. Burns credits dahlia <https://github.com/dahlia> Mozilla Public License Version 2.0 1. Definitions 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means (a) that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or (b) that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: (a) any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or (b) any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: (a) under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and (b) under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: (a) for any code that a Contributor has removed from Covered Software; or (b) for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or (c) under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: (a) such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and (b) You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. ---------------------------------------------------------
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow/pyproject.toml
[tool.black] line-length = 120 [tool.pytest.ini_options] markers = [ "sdk_test", "cli_test", "unittest", "e2etest", "flaky", "endpointtest", "mt_endpointtest", ] [tool.coverage.run] omit = [ # omit anything in a _restclient directory anywhere "*/_restclient/*", ]
0
promptflow_repo/promptflow/src/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_version.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- VERSION = "0.0.1"
0
promptflow_repo/promptflow/src/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/exceptions.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import inspect import string import traceback from enum import Enum from functools import cached_property class ErrorCategory(str, Enum): USER_ERROR = "UserError" SYSTEM_ERROR = "SystemError" UNKNOWN = "Unknown" class ErrorTarget(str, Enum): """The target of the error, indicates which part of the system the error occurs.""" EXECUTOR = "Executor" BATCH = "Batch" FLOW_EXECUTOR = "FlowExecutor" NODE_EXECUTOR = "NodeExecutor" TOOL = "Tool" AZURE_RUN_STORAGE = "AzureRunStorage" RUNTIME = "Runtime" UNKNOWN = "Unknown" RUN_TRACKER = "RunTracker" RUN_STORAGE = "RunStorage" CONTROL_PLANE_SDK = "ControlPlaneSDK" SERVING_APP = "ServingApp" FLOW_INVOKER = "FlowInvoker" FUNCTION_PATH = "FunctionPath" class PromptflowException(Exception): """Base exception for all errors. :param message: A message describing the error. This is the error message the user will see. :type message: str :param target: The name of the element that caused the exception to be thrown. :type target: ~promptflow.exceptions.ErrorTarget :param error: The original exception if any. :type error: Exception """ def __init__( self, message="", message_format="", target: ErrorTarget = ErrorTarget.UNKNOWN, module=None, **kwargs, ): self._inner_exception = kwargs.get("error") self._target = target self._module = module self._message_format = message_format self._kwargs = kwargs if message: self._message = str(message) elif self.message_format: self._message = self.message_format.format(**self.message_parameters) else: self._message = self.__class__.__name__ super().__init__(self._message) @property def message(self): """The error message.""" return self._message @property def message_format(self): """The error message format.""" return self._message_format @cached_property def message_parameters(self): """The error message parameters.""" if not self._kwargs: return {} required_arguments = self.get_arguments_from_message_format(self.message_format) parameters = {} for argument in required_arguments: if argument not in self._kwargs: parameters[argument] = f"<{argument}>" else: parameters[argument] = self._kwargs[argument] return parameters @cached_property def serializable_message_parameters(self): """The serializable error message parameters.""" return {k: str(v) for k, v in self.message_parameters.items()} @property def target(self): """The error target. :return: The error target. :rtype: ~promptflow.exceptions.ErrorTarget """ return self._target @target.setter def target(self, value): """Set the error target.""" self._target = value @property def module(self): """The module of the error that occurs. It is similar to `target` but is more specific. It is meant to store the Python module name of the code that raises the exception. """ return self._module @module.setter def module(self, value): """Set the module of the error that occurs.""" self._module = value @property def reference_code(self): """The reference code of the error.""" # In Python 3.11, the __str__ method of the Enum type returns the name of the enumeration member. # However, in earlier Python versions, the __str__ method returns the value of the enumeration member. # Therefore, when dealing with this situation, we need to make some additional adjustments. target = self.target.value if isinstance(self.target, ErrorTarget) else self.target if self.module: return f"{target}/{self.module}" else: return target @property def inner_exception(self): """Get the inner exception. The inner exception can be set via either style: 1) Set via the error parameter in the constructor. raise PromptflowException("message", error=inner_exception) 2) Set via raise from statement. raise PromptflowException("message") from inner_exception """ return self._inner_exception or self.__cause__ @property def additional_info(self): """Return a dict of the additional info of the exception. By default, this information could usually be empty. However, we can still define additional info for some specific exception. i.e. For ToolExcutionError, we may add the tool's line number, stacktrace to the additional info. """ return None @property def error_codes(self): """Returns a list of the error codes for this exception. The error codes is defined the same as the class inheritance. i.e. For ToolExcutionError which inherits from UserErrorException, The result would be ["UserErrorException", "ToolExecutionError"]. """ if getattr(self, "_error_codes", None): return self._error_codes from promptflow._utils.exception_utils import infer_error_code_from_class def reversed_error_codes(): for clz in self.__class__.__mro__: if clz is PromptflowException: break yield infer_error_code_from_class(clz) self._error_codes = list(reversed_error_codes()) self._error_codes.reverse() return self._error_codes def get_arguments_from_message_format(self, message_format): """Get the arguments from the message format.""" def iter_field_name(): if not message_format: return for _, field_name, _, _ in string.Formatter().parse(message_format): if field_name is not None: yield field_name return set(iter_field_name()) def __str__(self): """Return the error message. Some child classes may override this method to return a more detailed error message.""" return self.message class UserErrorException(PromptflowException): """Exception raised when invalid or unsupported inputs are provided.""" pass class SystemErrorException(PromptflowException): """Exception raised when service error is triggered.""" pass class ValidationException(UserErrorException): """Exception raised when validation fails.""" pass class _ErrorInfo: @classmethod def get_error_info(cls, e: Exception): if not isinstance(e, Exception): return None, None, None, None, None e = cls.select_exception(e) if cls._is_system_error(e): return ( ErrorCategory.SYSTEM_ERROR, cls._error_type(e), cls._error_target(e), cls._error_message(e), cls._error_detail(e), ) if cls._is_user_error(e): return ( ErrorCategory.USER_ERROR, cls._error_type(e), cls._error_target(e), cls._error_message(e), cls._error_detail(e), ) return ErrorCategory.UNKNOWN, cls._error_type(e), ErrorTarget.UNKNOWN, "", cls._error_detail(e) @classmethod def select_exception(cls, e: Exception): """Select the exception in e and e.__cause__, and prioritize the Exception defined in the promptflow.""" if isinstance(e, PromptflowException): return e # raise Exception("message") from PromptflowException("message") if e.__cause__ and isinstance(e.__cause__, PromptflowException): return e.__cause__ return e @classmethod def _is_system_error(cls, e: Exception): if isinstance(e, SystemErrorException): return True return False @classmethod def _is_user_error(cls, e: Exception): if isinstance(e, UserErrorException): return True return False @classmethod def _error_type(cls, e: Exception): """Return exception type. Note: For PromptflowException(error=ValueError(message="xxx")) or UserErrorException(error=ValueError(message="xxx")) or SystemErrorException(error=ValueError(message="xxx")), the desired return type is ValueError, not PromptflowException, UserErrorException and SystemErrorException. """ error_type = type(e).__name__ if type(e) in (PromptflowException, UserErrorException, SystemErrorException): if e.inner_exception: error_type = type(e.inner_exception).__name__ return error_type @classmethod def _error_target(cls, e: Exception): return getattr(e, "target", ErrorTarget.UNKNOWN) @classmethod def _error_message(cls, e: Exception): return getattr(e, "message_format", "") @classmethod def _error_detail(cls, e: Exception): exception_codes = cls._get_exception_codes(e) exception_code = None for item in exception_codes[::-1]: if "promptflow" in item["module"]: # Only record information within the promptflow package exception_code = item break if not exception_code: return "" return ( f"module={exception_code['module']}, " f"code={exception_code['exception_code']}, " f"lineno={exception_code['lineno']}." ) @classmethod def _get_exception_codes(cls, e: Exception) -> list: """ Obtain information on each line of the traceback, including the module name, exception code and lineno where the error occurred. :param e: Exception object :return: A list, each item contains information for each row of the traceback, which format is like this: { 'module': 'promptflow.executor.errors', 'exception_code': 'return self.inner_exception.additional_info', 'lineno': 223 } """ exception_codes = [] traceback_info = traceback.extract_tb(e.__traceback__) for item in traceback_info: lineno = item.lineno filename = item.filename line_code = item.line module = inspect.getmodule(None, _filename=filename) exception_code = {"module": "", "exception_code": line_code, "lineno": lineno} if module is not None: exception_code["module"] = module.__name__ exception_codes.append(exception_code) return exception_codes
0
promptflow_repo/promptflow/src/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_constants.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- CONNECTION_NAME_PROPERTY = "__connection_name" CONNECTION_SECRET_KEYS = "__secret_keys" PROMPTFLOW_CONNECTIONS = "PROMPTFLOW_CONNECTIONS" PROMPTFLOW_SECRETS_FILE = "PROMPTFLOW_SECRETS_FILE" PF_NO_INTERACTIVE_LOGIN = "PF_NO_INTERACTIVE_LOGIN" PF_LOGGING_LEVEL = "PF_LOGGING_LEVEL" OPENAI_API_KEY = "openai-api-key" BING_API_KEY = "bing-api-key" AOAI_API_KEY = "aoai-api-key" SERPAPI_API_KEY = "serpapi-api-key" CONTENT_SAFETY_API_KEY = "content-safety-api-key" ERROR_RESPONSE_COMPONENT_NAME = "promptflow" EXTENSION_UA = "prompt-flow-extension" LANGUAGE_KEY = "language" DEFAULT_ENCODING = "utf-8" # Constants related to execution LINE_NUMBER_KEY = "line_number" # Using the same key with portal. LINE_TIMEOUT_SEC = 600 class FlowLanguage: """The enum of tool source type.""" Python = "python" CSharp = "csharp" class AvailableIDE: VS = "vs" VS_CODE = "vsc" USER_AGENT = "USER_AGENT" PF_USER_AGENT = "PF_USER_AGENT" CLI_PACKAGE_NAME = 'promptflow' CURRENT_VERSION = 'current_version' LATEST_VERSION = 'latest_version' LAST_HINT_TIME = 'last_hint_time' LAST_CHECK_TIME = 'last_check_time' PF_VERSION_CHECK = "pf_version_check.json" HINT_INTERVAL_DAY = 7 GET_PYPI_INTERVAL_DAY = 7 _ENV_PF_INSTALLER = 'PF_INSTALLER'
0
promptflow_repo/promptflow/src/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore from promptflow._core.metric_logger import log_metric # flake8: noqa from promptflow._core.tool import ToolProvider, tool from promptflow._core.tracer import trace # control plane sdk functions from promptflow._sdk._load_functions import load_flow, load_run from ._sdk._pf_client import PFClient from ._version import VERSION # backward compatibility log_flow_metric = log_metric __version__ = VERSION __all__ = ["PFClient", "load_flow", "load_run", "log_metric", "ToolProvider", "tool", "trace"]
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/integrations/langchain.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from enum import Enum from typing import Any, Dict, List, Optional, Union from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentAction, AgentFinish, LLMResult from promptflow._core.tracer import Trace, Tracer, TraceType class LangChainEventType(Enum): LLM = "LLM", 0 CHAIN = "CHAIN", 1 TOOL = "TOOL", 2 AGENT = "AGENT", 3 def __init__(self, _: str, level: int): self._level = level def __lt__(self, other: "LangChainEventType"): return self._level < other._level class PromptFlowCallbackHandler(BaseCallbackHandler): """:class:`~promptflow.integrations.langchain.PromptFlowCallbackHandler` implements the `langchain.callbacks.base.BaseCallbackHandler` interface, which has a method for each event that can be subscribed to. The appropriate method will be called on the handler when the event is triggered. """ def __init__(self): super().__init__() self._tracer = Tracer.active_instance() self._events_stack = [] # Use this to track the current event type to avoid popping the wrong event @property def always_verbose(self) -> bool: """Whether to always be verbose.""" return True def _push(self, trace: Trace): if not self._tracer: return self._tracer._push(trace) def _pop(self, output=None, error: Optional[Exception] = None, event_type: Optional[LangChainEventType] = None): """Pop the trace from the trace stack. PromptFlowCallbackHandler assumed that the langchain events are called in paris, with a corresponding start and end event. However, this is not always true. Therefore, this function uses the event stack to keep track of the current event type, in order to avoid popping the wrong event. The function performs the following steps: 1. If the trace stack is empty, it simply returns without popping anything. 2. If the event type is None, it pops the top of the trace stack. 3. If the top of the event stack is equal to the given event type, it pops the top of the event stack and trace stack. 4. If the top of the event stack is less than the given event type, indicating the previous event without a corresponding end, it first pops the top of the event stack and then recursively calls the _pop function to continue popping until the correct event type is found. 5. If the top of the event stack is greater than the given event type, indicating the current event without a corresponding start, it simply returns without popping anything. By following this approach, the function ensures that only the correct events are popped from the stacks. """ if not self._tracer: return if not event_type: self._tracer._pop(output, error) else: if not self._events_stack: return if self._events_stack[-1] == event_type: self._events_stack.pop() self._tracer._pop(output, error) elif self._events_stack[-1] < event_type: self._events_stack.pop() self._tracer._pop() self._pop(output, error, event_type) else: return def on_llm_start(self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any) -> None: """Run when LLM starts running. :param serialized: The serialized LLM object. :type serialized: Dict[str, Any] :param prompts: The prompts used to run LLM. :type prompts: List[str] """ name = self._get_name(serialized) or "LLM" trace = Trace(name, TraceType.LANGCHAIN, {"prompts": prompts}) self._events_stack.append(LangChainEventType.LLM) self._push(trace) def on_llm_new_token(self, token: str, **kwargs: Any) -> None: """Run on new LLM token. Only available when streaming is enabled. :param token: The new token. :type token: str """ pass # Wo do not handle this event def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: """Run when LLM ends running. :param response: The response from LLM. :type response: LLMResult """ output = response self._pop(output, event_type=LangChainEventType.LLM) def on_llm_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None: """Run when LLM errors. :param error: The error from LLM. :type error: Union[Exception, KeyboardInterrupt] """ self._pop(error=error, event_type=LangChainEventType.LLM) def on_chain_start(self, serialized: Dict[str, Any], inputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain starts running. :param serialized: The serialized chain object. :type serialized: Dict[str, Any] :param inputs: The inputs used to run chain. :type inputs: Dict[str, Any] """ name = self._get_name(serialized) or "Chain" trace = Trace(name, TraceType.LANGCHAIN, inputs) self._events_stack.append(LangChainEventType.CHAIN) self._push(trace) def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None: """Run when chain ends running. :param outputs: The outputs from chain. :type outputs: Dict[str, Any] """ self._pop(outputs, event_type=LangChainEventType.CHAIN) def on_chain_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None: """Run when chain errors. :param error: The error from chain. :type error: Union[Exception, KeyboardInterrupt] """ self._pop(error=error, event_type=LangChainEventType.CHAIN) def on_tool_start(self, serialized: Dict[str, Any], input_str: str, **kwargs: Any) -> None: """Run when tool starts running. :param serialized: The serialized tool object. :type serialized: Dict[str, Any] :param input_str: The input string used to run tool. :type input_str: str """ name = self._get_name(serialized) or "Tool" trace = Trace(name, TraceType.LANGCHAIN, {"input_str": input_str}) self._events_stack.append(LangChainEventType.TOOL) self._push(trace) def on_tool_end(self, output: str, **kwargs: Any) -> None: """Run when tool ends running. :param output: The output from tool. :type output: str """ self._pop(output, event_type=LangChainEventType.TOOL) def on_tool_error(self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any) -> None: """Run when tool errors. :param error: The error from tool. :type error: Union[Exception, KeyboardInterrupt] """ self._pop(error=error, event_type=LangChainEventType.TOOL) def on_text(self, text: str, **kwargs: Any) -> None: """Run on arbitrary text. :param text: The text. :type text: str """ pass def on_agent_action(self, action: AgentAction, **kwargs: Any) -> None: """Run on agent action. :param action: The action from agent. :type action: AgentAction """ name = action.tool trace = Trace(name, TraceType.LANGCHAIN, {"tool_input": action.tool_input}) self._events_stack.append(LangChainEventType.AGENT) self._push(trace) def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None: """Run on agent end. :param finish: The finish from agent. :type finish: AgentFinish """ output = finish.return_values self._pop(output, event_type=LangChainEventType.AGENT) def _get_name(self, serialized: Dict[str, Any]): # For version 0.0.197 and earlier, the name is stored in the "name" field, # and for later versions, the name is stored in the "id" field. # If none exists, return None and use a default name. if "name" in serialized.keys(): return serialized["name"] elif "id" in serialized.keys() and isinstance(serialized["id"], list): return serialized["id"][-1] else: return None
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_user_agent.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from promptflow._version import VERSION USER_AGENT = "{}/{}".format("promptflow-cli", VERSION)
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/pf.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from promptflow._cli._pf.entry import main # this is a compatibility layer for the old CLI which is used for vscode extension if __name__ == "__main__": main()
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_utils.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import contextlib import json import os import shutil import sys import traceback from collections import namedtuple from configparser import ConfigParser from functools import wraps from pathlib import Path from typing import Dict, List, Optional, Tuple import pydash from dotenv import load_dotenv from tabulate import tabulate from promptflow._sdk._constants import CLIListOutputFormat from promptflow._sdk._utils import print_red_error, print_yellow_warning from promptflow._utils.exception_utils import ExceptionPresenter from promptflow._utils.logger_utils import get_cli_sdk_logger from promptflow._utils.utils import is_in_ci_pipeline from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException AzureMLWorkspaceTriad = namedtuple("AzureMLWorkspace", ["subscription_id", "resource_group_name", "workspace_name"]) logger = get_cli_sdk_logger() def _set_workspace_argument_for_subparsers(subparser, required=False): """Add workspace arguments to subparsers.""" # Make these arguments optional so that user can use local azure cli context subparser.add_argument( "--subscription", required=required, type=str, help="Subscription id, required when pass run id." ) subparser.add_argument( "--resource-group", "-g", required=required, type=str, help="Resource group name, required when pass run id." ) subparser.add_argument( "--workspace-name", "-w", required=required, type=str, help="Workspace name, required when pass run id." ) def dump_connection_file(dot_env_file: str): for key in ["AZURE_OPENAI_API_KEY", "AZURE_OPENAI_API_BASE", "CHAT_DEPLOYMENT_NAME"]: if key not in os.environ: # skip dump connection file if not all required environment variables are set return connection_file_path = "./connection.json" os.environ["PROMPTFLOW_CONNECTIONS"] = connection_file_path load_dotenv(dot_env_file) connection_dict = { "custom_connection": { "type": "CustomConnection", "value": { "AZURE_OPENAI_API_KEY": os.environ["AZURE_OPENAI_API_KEY"], "AZURE_OPENAI_API_BASE": os.environ["AZURE_OPENAI_API_BASE"], "CHAT_DEPLOYMENT_NAME": os.environ["CHAT_DEPLOYMENT_NAME"], }, "module": "promptflow.connections", } } with open(connection_file_path, "w") as f: json.dump(connection_dict, f) def get_workspace_triad_from_local() -> AzureMLWorkspaceTriad: subscription_id = None resource_group_name = None workspace_name = None azure_config_path = Path.home() / ".azure" config_parser = ConfigParser() # subscription id try: config_parser.read_file(open(azure_config_path / "clouds.config")) subscription_id = config_parser["AzureCloud"]["subscription"] except Exception: # pylint: disable=broad-except pass # resource group name & workspace name try: config_parser.read_file(open(azure_config_path / "config")) resource_group_name = config_parser["defaults"]["group"] workspace_name = config_parser["defaults"]["workspace"] except Exception: # pylint: disable=broad-except pass return AzureMLWorkspaceTriad(subscription_id, resource_group_name, workspace_name) def get_credentials_for_cli(): """ This function is part of mldesigner.dsl._dynamic_executor.DynamicExecutor._get_ml_client with some local imports. """ from azure.ai.ml.identity import AzureMLOnBehalfOfCredential from azure.identity import AzureCliCredential, DefaultAzureCredential, ManagedIdentityCredential # May return a different one if executing in local # credential priority: OBO > managed identity > default # check OBO via environment variable, the referenced code can be found from below search: # https://msdata.visualstudio.com/Vienna/_search?text=AZUREML_OBO_ENABLED&type=code&pageSize=25&filters=ProjectFilters%7BVienna%7D&action=contents if os.getenv(IdentityEnvironmentVariable.OBO_ENABLED_FLAG): logger.debug("User identity is configured, use OBO credential.") credential = AzureMLOnBehalfOfCredential() else: client_id_from_env = os.getenv(IdentityEnvironmentVariable.DEFAULT_IDENTITY_CLIENT_ID) if client_id_from_env: # use managed identity when client id is available from environment variable. # reference code: # https://learn.microsoft.com/en-us/azure/machine-learning/how-to-identity-based-service-authentication?tabs=cli#compute-cluster logger.debug("Use managed identity credential.") credential = ManagedIdentityCredential(client_id=client_id_from_env) elif is_in_ci_pipeline(): # use managed identity when executing in CI pipeline. logger.debug("Use azure cli credential.") credential = AzureCliCredential() else: # use default Azure credential to handle other cases. logger.debug("Use default credential.") credential = DefaultAzureCredential() return credential def get_client_info_for_cli(subscription_id: str = None, resource_group_name: str = None, workspace_name: str = None): if not (subscription_id and resource_group_name and workspace_name): workspace_triad = get_workspace_triad_from_local() subscription_id = subscription_id or workspace_triad.subscription_id resource_group_name = resource_group_name or workspace_triad.resource_group_name workspace_name = workspace_name or workspace_triad.workspace_name if not (subscription_id and resource_group_name and workspace_name): workspace_name = workspace_name or os.getenv("AZUREML_ARM_WORKSPACE_NAME") subscription_id = subscription_id or os.getenv("AZUREML_ARM_SUBSCRIPTION") resource_group_name = resource_group_name or os.getenv("AZUREML_ARM_RESOURCEGROUP") return subscription_id, resource_group_name, workspace_name def get_client_for_cli(*, subscription_id: str = None, resource_group_name: str = None, workspace_name: str = None): from azure.ai.ml import MLClient subscription_id, resource_group_name, workspace_name = get_client_info_for_cli( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name ) missing_fields = [] for key in ["workspace_name", "subscription_id", "resource_group_name"]: if not locals()[key]: missing_fields.append(key) if missing_fields: raise UserErrorException( "Please provide all required fields to work on specific workspace: {}".format(", ".join(missing_fields)), target=ErrorTarget.CONTROL_PLANE_SDK, ) return MLClient( credential=get_credentials_for_cli(), subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, ) def confirm(question, skip_confirm) -> bool: if skip_confirm: return True answer = input(f"{question} [y/n]") while answer.lower() not in ["y", "n"]: answer = input("Please input 'y' or 'n':") return answer.lower() == "y" @contextlib.contextmanager def inject_sys_path(path): original_sys_path = sys.path.copy() sys.path.insert(0, str(path)) try: yield finally: sys.path = original_sys_path def activate_action(name, description, epilog, add_params, subparsers, help_message, action_param_name="action"): parser = subparsers.add_parser( name, description=description, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter, help=help_message, ) if add_params: for add_param_func in add_params: add_param_func(parser) parser.set_defaults(**{action_param_name: name}) return parser class IdentityEnvironmentVariable: """This class is copied from mldesigner._constants.IdentityEnvironmentVariable.""" DEFAULT_IDENTITY_CLIENT_ID = "DEFAULT_IDENTITY_CLIENT_ID" OBO_ENABLED_FLAG = "AZUREML_OBO_ENABLED" def _dump_entity_with_warnings(entity) -> Dict: if not entity: return if isinstance(entity, Dict): return entity try: return entity._to_dict() # type: ignore except Exception as err: logger.warning("Failed to deserialize response: " + str(err)) logger.warning(str(entity)) logger.debug(traceback.format_exc()) def list_of_dict_to_dict(obj: list): if not isinstance(obj, list): return {} result = {} for item in obj: result.update(item) return result def list_of_dict_to_nested_dict(obj: list): result = {} for item in obj: for keys, value in item.items(): keys = keys.split(".") pydash.set_(result, keys, value) return result def _build_sorted_column_widths_tuple_list( columns: List[str], values1: Dict[str, int], values2: Dict[str, int], margins: Dict[str, int], ) -> List[Tuple[str, int]]: res = [] for column in columns: value = max(values1[column], values2[column]) + margins[column] res.append((column, value)) res.sort(key=lambda x: x[1], reverse=True) return res def _assign_available_width( column_expected_widths: List[Tuple[str, int]], available_width: int, column_assigned_widths: Dict[str, int], average_width: Optional[int] = None, ) -> Tuple[int, Dict[str, int]]: for column, expected_width in column_expected_widths: if available_width <= 0: break target = average_width if average_width is not None else column_assigned_widths[column] delta = expected_width - target if delta <= available_width: column_assigned_widths[column] = expected_width available_width -= delta else: column_assigned_widths[column] += available_width available_width = 0 return available_width, column_assigned_widths def _calculate_column_widths(df: "DataFrame", terminal_width: int) -> List[int]: num_rows, num_columns = len(df), len(df.columns) index_column_width = max(len(str(num_rows)) + 2, 4) # tabulate index column min width is 4 terminal_width_buffer = 10 available_width = terminal_width - terminal_width_buffer - index_column_width - (num_columns + 2) avg_available_width = available_width // num_columns header_widths, content_avg_widths, content_max_widths, column_margin = {}, {}, {}, {} for column in df.columns: header_widths[column] = len(column) contents = [] for value in df[column]: contents.append(len(str(value))) content_avg_widths[column] = sum(contents) // len(contents) content_max_widths[column] = max(contents) # if header is longer than the longest content, the margin is 4; otherwise is 2 # so we need to record this for every column if header_widths[column] >= content_max_widths[column]: column_margin[column] = 4 else: column_margin[column] = 2 column_widths = {} # first round: try to meet the average(or column header) width # record columns that need more width, we will deal with them in second round if we still have width round_one_left_columns = [] for column in df.columns: expected_width = max(header_widths[column], content_avg_widths[column]) + column_margin[column] if avg_available_width <= expected_width: column_widths[column] = avg_available_width round_one_left_columns.append(column) else: column_widths[column] = expected_width current_available_width = available_width - sum(column_widths.values()) if current_available_width > 0: # second round: assign left available width to those columns that need more # assign with greedy, sort recorded columns first from longest to shortest; # iterate and try to meet each column's expected width column_avg_tuples = _build_sorted_column_widths_tuple_list( round_one_left_columns, header_widths, content_avg_widths, column_margin ) current_available_width, column_widths = _assign_available_width( column_avg_tuples, current_available_width, column_widths, avg_available_width ) if current_available_width > 0: # third round: if there are still left available width, assign to try to meet the max width # still use greedy, sort first and iterate through all columns column_max_tuples = _build_sorted_column_widths_tuple_list( df.columns, header_widths, content_max_widths, column_margin ) current_available_width, column_widths = _assign_available_width( column_max_tuples, current_available_width, column_widths ) max_col_widths = [index_column_width] # index column max_col_widths += [max(column_widths[column] - column_margin[column], 1) for column in df.columns] # sub margin return max_col_widths def pretty_print_dataframe_as_table(df: "DataFrame") -> None: # try to get terminal window width try: terminal_width = shutil.get_terminal_size().columns except Exception: # pylint: disable=broad-except terminal_width = 120 # default value for Windows Terminal launch size columns column_widths = _calculate_column_widths(df, terminal_width) print(tabulate(df, headers="keys", tablefmt="grid", maxcolwidths=column_widths, maxheadercolwidths=column_widths)) def is_format_exception(): if os.environ.get("PROMPTFLOW_STRUCTURE_EXCEPTION_OUTPUT", "false").lower() == "true": return True return False def exception_handler(command: str): """Catch known cli exceptions.""" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: if is_format_exception(): # When the flag format_exception is set in command, # it will write a json with exception info and command to stderr. error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True) error_msg["command"] = " ".join(sys.argv) sys.stderr.write(json.dumps(error_msg)) if isinstance(e, PromptflowException): print_red_error(f"{command} failed with {e.__class__.__name__}: {str(e)}") exit(1) else: raise e return wrapper return decorator def get_secret_input(prompt, mask="*"): """Get secret input with mask printed on screen in CLI. Provide better handling for control characters: - Handle Ctrl-C as KeyboardInterrupt - Ignore control characters and print warning message. """ if not isinstance(prompt, str): raise TypeError(f"prompt must be a str, not ${type(prompt).__name__}") if not isinstance(mask, str): raise TypeError(f"mask argument must be a one-character str, not ${type(mask).__name__}") if len(mask) != 1: raise ValueError("mask argument must be a one-character str") if sys.platform == "win32": # For some reason, mypy reports that msvcrt doesn't have getch, ignore this warning: from msvcrt import getch # type: ignore else: # macOS and Linux import termios import tty def getch(): fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch secret_input = [] sys.stdout.write(prompt) sys.stdout.flush() while True: key = ord(getch()) if key == 13: # Enter key pressed. sys.stdout.write("\n") return "".join(secret_input) elif key == 3: # Ctrl-C pressed. raise KeyboardInterrupt() elif key in (8, 127): # Backspace/Del key erases previous output. if len(secret_input) > 0: # Erases previous character. sys.stdout.write("\b \b") # \b doesn't erase the character, it just moves the cursor back. sys.stdout.flush() secret_input = secret_input[:-1] elif 0 <= key <= 31: msg = "\nThe last user input got ignored as it is control character." print_yellow_warning(msg) sys.stdout.write(prompt + mask * len(secret_input)) sys.stdout.flush() else: # display the mask character. char = chr(key) sys.stdout.write(mask) sys.stdout.flush() secret_input.append(char) def _copy_to_flow(flow_path, source_file): target = flow_path / source_file.name action = "Overwriting" if target.exists() else "Creating" if source_file.is_file(): print(f"{action} {source_file.name}...") shutil.copy2(source_file, target) else: print(f"{action} {source_file.name} folder...") shutil.copytree(source_file, target, dirs_exist_ok=True) def _output_result_list_with_format(result_list: List[Dict], output_format: CLIListOutputFormat) -> None: import pandas as pd if output_format == CLIListOutputFormat.TABLE: df = pd.DataFrame(result_list) df.fillna("", inplace=True) pretty_print_dataframe_as_table(df) elif output_format == CLIListOutputFormat.JSON: print(json.dumps(result_list, indent=4)) else: warning_message = ( f"Unknown output format {output_format!r}, accepted values are 'json' and 'table';" "will print using 'json'." ) logger.warning(warning_message) print(json.dumps(result_list, indent=4)) def _get_cli_activity_name(cli, args): activity_name = cli if getattr(args, "action", None): activity_name += f".{args.action}" if getattr(args, "sub_action", None): activity_name += f".{args.sub_action}" return activity_name def _try_delete_existing_run_record(run_name: str): from promptflow._sdk._errors import RunNotFoundError from promptflow._sdk._orm import RunInfo as ORMRun try: ORMRun.delete(run_name) except RunNotFoundError: pass
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_params.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, PROMPT_FLOW_RUNS_DIR_NAME, CLIListOutputFormat, FlowType # TODO: avoid azure dependency here MAX_LIST_CLI_RESULTS = 50 class AppendToDictAction(argparse._AppendAction): # pylint: disable=protected-access def __call__(self, parser, namespace, values, option_string=None): action = self.get_action(values, option_string) super(AppendToDictAction, self).__call__(parser, namespace, action, option_string) def get_action(self, values, option_string): # pylint: disable=no-self-use from promptflow._sdk._utils import strip_quotation kwargs = {} for item in values: try: key, value = strip_quotation(item).split("=", 1) kwargs[key] = strip_quotation(value) except ValueError: raise Exception("Usage error: {} KEY=VALUE [KEY=VALUE ...]".format(option_string)) return kwargs class FlowTestInputAction(AppendToDictAction): # pylint: disable=protected-access def get_action(self, values, option_string): # pylint: disable=no-self-use if len(values) == 1 and "=" not in values[0]: from promptflow._utils.load_data import load_data if not values[0].endswith(".jsonl"): raise ValueError("Only support jsonl file as input.") return load_data(local_path=values[0])[0] else: return super().get_action(values, option_string) def add_param_yes(parser): parser.add_argument( "-y", "--yes", "--assume-yes", action="store_true", help="Automatic yes to all prompts; assume 'yes' as answer to all prompts and run non-interactively.", ) def add_param_ua(parser): # suppress user agent for now since it's only used in vscode extension parser.add_argument("--user-agent", help=argparse.SUPPRESS) def add_param_flow_display_name(parser): parser.add_argument("--flow", type=str, required=True, help="The flow name to create.") def add_param_entry(parser): parser.add_argument("--entry", type=str, help="The entry file.") def add_param_function(parser): parser.add_argument("--function", type=str, help="The function name in entry file.") def add_param_prompt_template(parser): parser.add_argument( "--prompt-template", action=AppendToDictAction, help="The prompt template parameter and assignment.", nargs="+" ) def add_param_set(parser): parser.add_argument( "--set", dest="params_override", action=AppendToDictAction, help="Update an object by specifying a property path and value to set. Example: --set " "property1.property2=<value>.", nargs="+", ) def add_param_set_positional(parser): parser.add_argument( "params_override", action=AppendToDictAction, help="Set an object by specifying a property path and value to set. Example: set " "property1.property2=<value>.", nargs="+", ) def add_param_environment_variables(parser): parser.add_argument( "--environment-variables", action=AppendToDictAction, help="Environment variables to set by specifying a property path and value. Example: --environment-variable " "key1='${my_connection.api_key}' key2='value2'. The value reference to connection keys will be resolved " "to the actual value, and all environment variables specified will be set into os.environ.", nargs="+", ) def add_param_connections(parser): parser.add_argument( "--connections", action=AppendToDictAction, help="Overwrite node level connections with provided value. Example: --connections " "node1.connection=test_llm_connection node1.deployment_name=gpt-35-turbo", nargs="+", ) def add_param_columns_mapping(parser): parser.add_argument( "--column-mapping", action=AppendToDictAction, help="Inputs column mapping, use ${data.xx} to refer to data columns, " "use ${run.inputs.xx} to refer to referenced run's data columns. " "and use ${run.outputs.xx} to refer to referenced run's output columns." "Example: --column-mapping data1='${data.data1}' data2='${run.inputs.data2}' data3='${run.outputs.data3}'", nargs="+", ) def add_param_set_tool_extra_info(parser): parser.add_argument( "--set", dest="extra_info", action=AppendToDictAction, help="Set extra information about the tool. Example: --set <key>=<value>.", nargs="+", ) def add_param_inputs(parser): parser.add_argument( "--inputs", action=FlowTestInputAction, help="Input datas of file for the flow. Example: --inputs data1=data1_val data2=data2_val", nargs="+", ) def add_param_env(parser): parser.add_argument( "--env", type=str, default=None, help="The dotenv file path containing the environment variables to be used in the flow.", ) def add_param_output(parser): parser.add_argument( "-o", "--output", type=str, help=( f"The output directory to store the results. " f"Default to be ~/{PROMPT_FLOW_DIR_NAME}/{PROMPT_FLOW_RUNS_DIR_NAME} if not specified." ), ) def add_param_overwrite(parser): parser.add_argument("--overwrite", action="store_true", help="Overwrite the existing results.") def add_param_source(parser): parser.add_argument("--source", type=str, required=True, help="The flow or run source to be used.") def add_param_run_name(parser): parser.add_argument("-n", "--name", required=True, type=str, help="Name of the run.") def add_param_connection_name(parser): parser.add_argument("-n", "--name", type=str, help="Name of the connection to create.") def add_param_max_results(parser): parser.add_argument( # noqa: E731 "-r", "--max-results", dest="max_results", type=int, default=MAX_LIST_CLI_RESULTS, help=f"Max number of results to return. Default is {MAX_LIST_CLI_RESULTS}.", ) def add_param_all_results(parser): parser.add_argument( # noqa: E731 "--all-results", action="store_true", dest="all_results", default=False, help="Returns all results. Default to False.", ) def add_param_variant(parser): parser.add_argument( "--variant", "-v", type=str, help="The variant to be used in flow, will use default variant if not specified.", ) def add_parser_build(subparsers, entity_name: str): add_param_build_output = lambda parser: parser.add_argument( # noqa: E731 "--output", "-o", required=True, type=str, help="The destination folder path." ) add_param_format = lambda parser: parser.add_argument( # noqa: E731 "--format", "-f", type=str, help="The format to build with.", choices=["docker", "executable"] ) # this is a hidden parameter for `mldesigner compile` command add_param_flow_only = lambda parser: parser.add_argument( # noqa: E731 "--flow-only", action="store_true", help=argparse.SUPPRESS, ) add_params = [ add_param_source, add_param_build_output, add_param_format, add_param_flow_only, add_param_variant, ] + base_params from promptflow._cli._utils import activate_action description = f"Build a {entity_name} for further sharing or deployment." activate_action( name="build", description=description, epilog=f"pf {entity_name} build --source <source> --output <output> --format " f"docker|package", add_params=add_params, subparsers=subparsers, action_param_name="sub_action", help_message=description, ) def add_param_debug(parser): parser.add_argument( "-d", "--debug", action="store_true", help="The flag to turn on debug mode for cli.", ) def add_param_verbose(parser): parser.add_argument( "--verbose", action="store_true", help="Increase logging verbosity. Use --debug for full debug logs.", ) def add_param_config(parser): parser.add_argument( "--config", nargs="+", action=AppendToDictAction, help=argparse.SUPPRESS, ) logging_params = [add_param_verbose, add_param_debug] base_params = logging_params + [ add_param_ua, ] def add_param_archived_only(parser): parser.add_argument( "--archived-only", action="store_true", help="Only list archived records.", ) def add_param_include_archived(parser): parser.add_argument( "--include-archived", action="store_true", help="List both archived records and active records.", ) def add_param_output_format(parser): parser.add_argument( "-o", "--output", type=str, default=CLIListOutputFormat.JSON, help="Output format, accepted values are 'json' and 'table'. Default is 'json'.", choices=[CLIListOutputFormat.TABLE, CLIListOutputFormat.JSON], ) def add_param_include_others(parser): parser.add_argument( "--include-others", action="store_true", help="Get records that are owned by all users.", ) def add_param_flow_type(parser): parser.add_argument( "--type", type=str, help=( f"The type of the flow. Available values are {FlowType.get_all_values()}. " f"Default to be None, which means all types included." ), ) def add_param_flow_name(parser): parser.add_argument( "-n", "--name", type=str, required=True, help="The name of the flow.", )
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/_flow.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import importlib import json import os import shutil import subprocess import sys import tempfile import webbrowser from pathlib import Path from promptflow._cli._params import ( add_param_config, add_param_entry, add_param_environment_variables, add_param_flow_display_name, add_param_function, add_param_inputs, add_param_prompt_template, add_param_source, add_param_yes, add_parser_build, base_params, ) from promptflow._cli._pf._init_entry_generators import ( AzureOpenAIConnectionGenerator, ChatFlowDAGGenerator, FlowDAGGenerator, OpenAIConnectionGenerator, StreamlitFileReplicator, ToolMetaGenerator, ToolPyGenerator, copy_extra_files, ) from promptflow._cli._pf._run import exception_handler from promptflow._cli._utils import _copy_to_flow, activate_action, confirm, inject_sys_path, list_of_dict_to_dict from promptflow._constants import LANGUAGE_KEY, FlowLanguage from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, ConnectionProvider from promptflow._sdk._pf_client import PFClient from promptflow._sdk.operations._flow_operations import FlowOperations from promptflow._utils.logger_utils import get_cli_sdk_logger from promptflow.exceptions import ErrorTarget, UserErrorException DEFAULT_CONNECTION = "open_ai_connection" DEFAULT_DEPLOYMENT = "gpt-35-turbo" logger = get_cli_sdk_logger() def add_flow_parser(subparsers): """Add flow parser to the pf subparsers.""" flow_parser = subparsers.add_parser( "flow", description="Manage flows for promptflow.", help="pf flow", ) flow_subparsers = flow_parser.add_subparsers() add_parser_init_flow(flow_subparsers) add_parser_test_flow(flow_subparsers) add_parser_serve_flow(flow_subparsers) add_parser_build(flow_subparsers, "flow") add_parser_validate_flow(flow_subparsers) flow_parser.set_defaults(action="flow") def dispatch_flow_commands(args: argparse.Namespace): if args.sub_action == "init": init_flow(args) elif args.sub_action == "test": test_flow(args) elif args.sub_action == "serve": serve_flow(args) elif args.sub_action == "build": build_flow(args) elif args.sub_action == "validate": validate_flow(args) def add_parser_init_flow(subparsers): """Add flow create parser to the pf flow subparsers.""" epilog = """ Examples: # Creating a flow folder with code/prompts and yaml definitions of the flow: pf flow init --flow my-awesome-flow # Creating an eval prompt flow: pf flow init --flow my-awesome-flow --type evaluation # Creating a flow in existing folder pf flow init --flow intent_copilot --entry intent.py --function extract_intent --prompt-template prompt_template=tpl.jinja2 """ # noqa: E501 add_param_type = lambda parser: parser.add_argument( # noqa: E731 "--type", type=str, choices=["standard", "evaluation", "chat"], help="The initialized flow type.", default="standard", ) add_param_connection = lambda parser: parser.add_argument( # noqa: E731 "--connection", type=str, help=argparse.SUPPRESS ) add_param_deployment = lambda parser: parser.add_argument( # noqa: E731 "--deployment", type=str, help=argparse.SUPPRESS ) add_params = [ add_param_type, add_param_yes, add_param_flow_display_name, add_param_entry, add_param_function, add_param_prompt_template, add_param_connection, add_param_deployment, ] + base_params activate_action( name="init", description="Creating a flow folder with code/prompts and yaml definitions of the flow.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Initialize a prompt flow directory.", action_param_name="sub_action", ) def add_parser_serve_flow(subparsers): """Add flow serve parser to the pf flow subparsers.""" epilog = """ Examples: # Serve flow as an endpoint: pf flow serve --source <path_to_flow> # Serve flow as an endpoint with specific port and host: pf flow serve --source <path_to_flow> --port 8080 --host localhost --environment-variables key1="`${my_connection.api_key}" key2="value2" # Serve flow without opening browser: pf flow serve --source <path_to_flow> --skip-open-browser """ # noqa: E501 add_param_port = lambda parser: parser.add_argument( # noqa: E731 "--port", type=int, default=8080, help="The port on which endpoint to run." ) add_param_host = lambda parser: parser.add_argument( # noqa: E731 "--host", type=str, default="localhost", help="The host of endpoint." ) add_param_static_folder = lambda parser: parser.add_argument( # noqa: E731 "--static_folder", type=str, help=argparse.SUPPRESS ) add_param_skip_browser = lambda parser: parser.add_argument( # noqa: E731 "--skip-open-browser", action="store_true", default=False, help="Skip open browser for flow serving." ) activate_action( name="serve", description="Serving a flow as an endpoint.", epilog=epilog, add_params=[ add_param_source, add_param_port, add_param_host, add_param_static_folder, add_param_environment_variables, add_param_config, add_param_skip_browser, ] + base_params, subparsers=subparsers, help_message="Serving a flow as an endpoint.", action_param_name="sub_action", ) def add_parser_validate_flow(subparsers): """Add flow validate parser to the pf flow subparsers.""" epilog = """ Examples: # Validate flow pf flow validate --source <path_to_flow> """ # noqa: E501 activate_action( name="validate", description="Validate a flow and generate flow.tools.json for the flow.", epilog=epilog, add_params=[ add_param_source, ] + base_params, subparsers=subparsers, help_message="Validate a flow. Will raise error if the flow is not valid.", action_param_name="sub_action", ) def add_parser_test_flow(subparsers): """Add flow test parser to the pf flow subparsers.""" epilog = """ Examples: # Test the flow: pf flow test --flow my-awesome-flow # Test the flow with inputs: pf flow test --flow my-awesome-flow --inputs key1=val1 key2=val2 # Test the flow with specified variant node: pf flow test --flow my-awesome-flow --variant ${node_name.variant_name} # Test the single node in the flow: pf flow test --flow my-awesome-flow --node node_name # Chat in the flow: pf flow test --flow my-awesome-flow --node node_name --interactive """ # noqa: E501 add_param_flow = lambda parser: parser.add_argument( # noqa: E731 "--flow", type=str, required=True, help="the flow directory to test." ) add_param_node = lambda parser: parser.add_argument( # noqa: E731 "--node", type=str, help="the node name in the flow need to be tested." ) add_param_variant = lambda parser: parser.add_argument( # noqa: E731 "--variant", type=str, help="Node & variant name in format of ${node_name.variant_name}." ) add_param_interactive = lambda parser: parser.add_argument( # noqa: E731 "--interactive", action="store_true", help="start a interactive chat session for chat flow." ) add_param_multi_modal = lambda parser: parser.add_argument( # noqa: E731 "--multi-modal", action="store_true", help=argparse.SUPPRESS ) add_param_ui = lambda parser: parser.add_argument("--ui", action="store_true", help=argparse.SUPPRESS) # noqa: E731 add_param_input = lambda parser: parser.add_argument("--input", type=str, help=argparse.SUPPRESS) # noqa: E731 add_param_detail = lambda parser: parser.add_argument( # noqa: E731 "--detail", type=str, default=None, required=False, help=argparse.SUPPRESS ) add_params = [ add_param_flow, add_param_node, add_param_variant, add_param_interactive, add_param_input, add_param_inputs, add_param_environment_variables, add_param_multi_modal, add_param_ui, add_param_config, add_param_detail, ] + base_params activate_action( name="test", description="Test the flow.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Test the prompt flow or flow node.", action_param_name="sub_action", ) def init_flow(args): if any([args.entry, args.prompt_template]): print("Creating flow from existing folder...") prompt_tpl = {} if args.prompt_template: for _dct in args.prompt_template: prompt_tpl.update(**_dct) _init_existing_flow(args.flow, args.entry, args.function, prompt_tpl) else: # Create an example flow print("Creating flow from scratch...") _init_flow_by_template(args.flow, args.type, args.yes, args.connection, args.deployment) def _init_existing_flow(flow_name, entry=None, function=None, prompt_params: dict = None): flow_path = Path(flow_name).resolve() if not function: logger.error("--function must be specified when --entry is specified.") return if not flow_path.exists(): logger.error(f"{flow_path.resolve()} must exist when --entry specified.") return print(f"Change working directory to .. {flow_path.resolve()}") os.chdir(flow_path) entry = Path(entry).resolve() if not entry.exists(): logger.error(f"{entry} must exist.") return with inject_sys_path(flow_path): # import function object function_obj = getattr(importlib.import_module(entry.stem), function) # Create tool.py tool_py = f"{function}_tool.py" python_tool = ToolPyGenerator(entry, function, function_obj) tools = ToolMetaGenerator(tool_py, function, function_obj, prompt_params) python_tool_inputs = [arg.name for arg in python_tool.tool_arg_list] for tool_input in tools.prompt_params.keys(): if tool_input not in python_tool_inputs: error = ValueError(f"Template parameter {tool_input} doesn't find in python function arguments.") raise UserErrorException(target=ErrorTarget.CONTROL_PLANE_SDK, message=str(error), error=error) python_tool.generate_to_file(tool_py) # Create .promptflow and flow.tools.json meta_dir = flow_path / PROMPT_FLOW_DIR_NAME meta_dir.mkdir(parents=True, exist_ok=True) tools.generate_to_file(meta_dir / "flow.tools.json") # Create flow.dag.yaml FlowDAGGenerator(tool_py, function, function_obj, prompt_params).generate_to_file("flow.dag.yaml") copy_extra_files(flow_path=flow_path, extra_files=["requirements.txt", ".gitignore"]) print(f"Done. Generated flow in folder: {flow_path.resolve()}.") def _init_chat_flow(flow_name, flow_path, connection=None, deployment=None): from promptflow._sdk._configuration import Configuration example_flow_path = Path(__file__).parent.parent / "data" / "chat_flow" / "flow_files" for item in list(example_flow_path.iterdir()): _copy_to_flow(flow_path=flow_path, source_file=item) # Generate flow.dag.yaml to chat flow. connection = connection or DEFAULT_CONNECTION deployment = deployment or DEFAULT_DEPLOYMENT ChatFlowDAGGenerator(connection=connection, deployment=deployment).generate_to_file(flow_path / "flow.dag.yaml") # When customer not configure the remote connection provider, create connection yaml to chat flow. is_local_connection = Configuration.get_instance().get_connection_provider() == ConnectionProvider.LOCAL if is_local_connection: OpenAIConnectionGenerator(connection=connection).generate_to_file(flow_path / "openai.yaml") AzureOpenAIConnectionGenerator(connection=connection).generate_to_file(flow_path / "azure_openai.yaml") copy_extra_files(flow_path=flow_path, extra_files=["requirements.txt", ".gitignore"]) print(f"Done. Created chat flow folder: {flow_path.resolve()}.") if is_local_connection: print( f"The generated chat flow is requiring a connection named {connection}, " "please follow the steps in README.md to create if you haven't done that." ) else: print( f"The generated chat flow is requiring a connection named {connection}, " "please ensure it exists in workspace." ) flow_test_command = f"pf flow test --flow {flow_name} --interactive" print(f"You can execute this command to test the flow, {flow_test_command}") def _init_standard_or_evaluation_flow(flow_name, flow_path, flow_type): example_flow_path = Path(__file__).parent.parent / "data" / f"{flow_type}_flow" for item in list(example_flow_path.iterdir()): _copy_to_flow(flow_path=flow_path, source_file=item) copy_extra_files(flow_path=flow_path, extra_files=["requirements.txt", ".gitignore"]) print(f"Done. Created {flow_type} flow folder: {flow_path.resolve()}.") flow_test_command = f"pf flow test --flow {flow_name} --input {os.path.join(flow_name, 'data.jsonl')}" print(f"You can execute this command to test the flow, {flow_test_command}") def _init_flow_by_template(flow_name, flow_type, overwrite=False, connection=None, deployment=None): flow_path = Path(flow_name) if flow_path.exists(): if not flow_path.is_dir(): logger.error(f"{flow_path.resolve()} is not a folder.") return answer = confirm( "The flow folder already exists, do you want to create the flow in this existing folder?", overwrite ) if not answer: print("The 'pf init' command has been cancelled.") return flow_path.mkdir(parents=True, exist_ok=True) if flow_type == "chat": _init_chat_flow(flow_name=flow_name, flow_path=flow_path, connection=connection, deployment=deployment) else: _init_standard_or_evaluation_flow(flow_name=flow_name, flow_path=flow_path, flow_type=flow_type) @exception_handler("Flow test") def test_flow(args): from promptflow._sdk._load_functions import load_flow config = list_of_dict_to_dict(args.config) pf_client = PFClient(config=config) if args.environment_variables: environment_variables = list_of_dict_to_dict(args.environment_variables) else: environment_variables = {} inputs = {} if args.input: from promptflow._utils.load_data import load_data if args.input and not args.input.endswith(".jsonl"): error = ValueError("Only support jsonl file as input.") raise UserErrorException( target=ErrorTarget.CONTROL_PLANE_SDK, message=str(error), error=error, ) inputs = load_data(local_path=args.input)[0] if args.inputs: inputs.update(list_of_dict_to_dict(args.inputs)) if args.multi_modal or args.ui: with tempfile.TemporaryDirectory() as temp_dir: flow = load_flow(args.flow) script_path = [ os.path.join(temp_dir, "main.py"), os.path.join(temp_dir, "utils.py"), os.path.join(temp_dir, "logo.png"), ] for script in script_path: StreamlitFileReplicator( flow_name=flow.display_name if flow.display_name else flow.name, flow_dag_path=flow.flow_dag_path, ).generate_to_file(script) main_script_path = os.path.join(temp_dir, "main.py") pf_client.flows._chat_with_ui(script=main_script_path) else: if args.interactive: pf_client.flows._chat( flow=args.flow, inputs=inputs, environment_variables=environment_variables, variant=args.variant, show_step_output=args.verbose, ) else: result = pf_client.flows.test( flow=args.flow, inputs=inputs, environment_variables=environment_variables, variant=args.variant, node=args.node, allow_generator_output=False, stream_output=False, dump_test_result=True, detail=args.detail, ) # Print flow/node test result if isinstance(result, dict): print(json.dumps(result, indent=4, ensure_ascii=False)) else: print(result) def serve_flow(args): from promptflow._sdk._load_functions import load_flow logger.info("Start serve model: %s", args.source) # Set environment variable for local test source = Path(args.source) logger.info( "Start promptflow server with port %s", args.port, ) os.environ["PROMPTFLOW_PROJECT_PATH"] = source.absolute().as_posix() flow = load_flow(args.source) if flow.dag.get(LANGUAGE_KEY, FlowLanguage.Python) == FlowLanguage.CSharp: serve_flow_csharp(args, source) else: serve_flow_python(args, source) logger.info("Promptflow app ended") def serve_flow_csharp(args, source): from promptflow.batch._csharp_executor_proxy import EXECUTOR_SERVICE_DLL try: # Change working directory to model dir logger.info(f"Change working directory to model dir {source}") os.chdir(source) command = [ "dotnet", EXECUTOR_SERVICE_DLL, "--port", str(args.port), "--yaml_path", "flow.dag.yaml", "--assembly_folder", ".", "--connection_provider_url", "", "--log_path", "", "--serving", ] subprocess.run(command, stdout=sys.stdout, stderr=sys.stderr) except KeyboardInterrupt: pass def _resolve_python_flow_additional_includes(source) -> Path: # Resolve flow additional includes from promptflow import load_flow flow = load_flow(source) with FlowOperations._resolve_additional_includes(flow.path) as resolved_flow_path: if resolved_flow_path == flow.path: return source # Copy resolved flow to temp folder if additional includes exists # Note: DO NOT use resolved flow path directly, as when inner logic raise exception, # temp dir will fail due to file occupied by other process. temp_flow_path = Path(tempfile.TemporaryDirectory().name) shutil.copytree(src=resolved_flow_path.parent, dst=temp_flow_path, dirs_exist_ok=True) return temp_flow_path def serve_flow_python(args, source): from promptflow._sdk._serving.app import create_app static_folder = args.static_folder if static_folder: static_folder = Path(static_folder).absolute().as_posix() config = list_of_dict_to_dict(args.config) source = _resolve_python_flow_additional_includes(source) os.environ["PROMPTFLOW_PROJECT_PATH"] = source.absolute().as_posix() logger.info(f"Change working directory to model dir {source}") os.chdir(source) app = create_app( static_folder=static_folder, environment_variables=list_of_dict_to_dict(args.environment_variables), config=config, ) if not args.skip_open_browser: target = f"http://{args.host}:{args.port}" logger.info(f"Opening browser {target}...") webbrowser.open(target) # Debug is not supported for now as debug will rerun command, and we changed working directory. app.run(port=args.port, host=args.host) def build_flow(args): """ i. `pf flow build --source <flow_folder> --output <output_folder> --variant <variant>` ii. `pf flow build --source <flow_folder> --format docker --output <output_folder> --variant <variant>` iii. `pf flow build --source <flow_folder> --format executable --output <output_folder> --variant <variant>` # default to resolve variant and update flow.dag.yaml, support this in case customer want to keep the variants for continuous development # we can delay this before receiving specific customer request v. `pf flow build --source <flow_folder> --output <output_folder> --keep-variants` output structure: flow/ .connections/ Dockerfile|executable.exe ... """ pf_client = PFClient() pf_client.flows.build( flow=args.source, output=args.output, format=args.format, variant=args.variant, flow_only=args.flow_only, ) print( f"Exported flow to {Path(args.output).absolute().as_posix()}.\n" f"please check {Path(args.output).joinpath('README.md').absolute().as_posix()} " f"for how to use it." ) def validate_flow(args): pf_client = PFClient() validation_result = pf_client.flows.validate( flow=args.source, ) print(repr(validation_result)) if not validation_result.passed: exit(1) else: exit(0)
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/_experiment.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import json from promptflow._cli._params import ( add_param_all_results, add_param_archived_only, add_param_include_archived, add_param_max_results, base_params, ) from promptflow._cli._utils import activate_action, exception_handler from promptflow._sdk._constants import get_list_view_type from promptflow._sdk._load_functions import load_common from promptflow._sdk._pf_client import PFClient from promptflow._sdk.entities._experiment import Experiment, ExperimentTemplate from promptflow._utils.logger_utils import get_cli_sdk_logger logger = get_cli_sdk_logger() _client = None def _get_pf_client(): global _client if _client is None: _client = PFClient() return _client def add_param_template(parser): parser.add_argument("--template", type=str, required=True, help="The experiment template path.") def add_param_name(parser): parser.add_argument("--name", "-n", type=str, help="The experiment name.") def add_experiment_create(subparsers): epilog = """ Examples: # Create an experiment from a template: pf experiment create --template flow.exp.yaml """ add_params = [add_param_template, add_param_name] + base_params create_parser = activate_action( name="create", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Create an experiment.", action_param_name="sub_action", ) return create_parser def add_experiment_list(subparsers): epilog = """ Examples: # List all experiments: pf experiment list """ activate_action( name="list", description="List all experiments.", epilog=epilog, add_params=[ add_param_max_results, add_param_all_results, add_param_archived_only, add_param_include_archived, ] + base_params, subparsers=subparsers, help_message="List all experiments.", action_param_name="sub_action", ) def add_experiment_show(subparsers): epilog = """ Examples: # Get and show an experiment: pf experiment show -n my_experiment """ activate_action( name="show", description="Show an experiment for promptflow.", epilog=epilog, add_params=[add_param_name] + base_params, subparsers=subparsers, help_message="Show an experiment for promptflow.", action_param_name="sub_action", ) def add_experiment_start(subparsers): epilog = """ Examples: # Start an experiment: pf experiment start -n my_experiment """ activate_action( name="start", description="Start an experiment.", epilog=epilog, add_params=[add_param_name] + base_params, subparsers=subparsers, help_message="Start an experiment.", action_param_name="sub_action", ) def add_experiment_parser(subparsers): experiment_parser = subparsers.add_parser( "experiment", description="[Experimental] A CLI tool to manage experiment for prompt flow.", help="[Experimental] pf experiment. This is an experimental feature, and may change at any time.", ) subparsers = experiment_parser.add_subparsers() add_experiment_create(subparsers) add_experiment_list(subparsers) add_experiment_show(subparsers) add_experiment_start(subparsers) experiment_parser.set_defaults(action="experiment") def dispatch_experiment_commands(args: argparse.Namespace): if args.sub_action == "create": create_experiment(args) elif args.sub_action == "list": list_experiment(args) elif args.sub_action == "show": show_experiment(args) elif args.sub_action == "start": start_experiment(args) elif args.sub_action == "show-status": pass elif args.sub_action == "update": pass elif args.sub_action == "delete": pass elif args.sub_action == "stop": pass elif args.sub_action == "test": pass elif args.sub_action == "clone": pass @exception_handler("Create experiment") def create_experiment(args: argparse.Namespace): template_path = args.template logger.debug("Loading experiment template from %s", template_path) template = load_common(ExperimentTemplate, source=template_path) logger.debug("Creating experiment from template %s", template.name) experiment = Experiment.from_template(template, name=args.name) logger.debug("Creating experiment %s", experiment.name) exp = _get_pf_client()._experiments.create_or_update(experiment) print(json.dumps(exp._to_dict(), indent=4)) @exception_handler("List experiment") def list_experiment(args: argparse.Namespace): list_view_type = get_list_view_type(archived_only=args.archived_only, include_archived=args.include_archived) results = _get_pf_client()._experiments.list(args.max_results, list_view_type=list_view_type) print(json.dumps([result._to_dict() for result in results], indent=4)) @exception_handler("Show experiment") def show_experiment(args: argparse.Namespace): result = _get_pf_client()._experiments.get(args.name) print(json.dumps(result._to_dict(), indent=4)) @exception_handler("Start experiment") def start_experiment(args: argparse.Namespace): result = _get_pf_client()._experiments.start(args.name) print(json.dumps(result._to_dict(), indent=4))
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/entry.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=wrong-import-position import json import time from promptflow._cli._pf._experiment import add_experiment_parser, dispatch_experiment_commands from promptflow._cli._utils import _get_cli_activity_name from promptflow._sdk._configuration import Configuration from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity from promptflow._sdk._telemetry.activity import update_activity_name # Log the start time start_time = time.perf_counter() # E402 module level import not at top of file import argparse # noqa: E402 import logging # noqa: E402 import sys # noqa: E402 from promptflow._cli._pf._config import add_config_parser, dispatch_config_commands # noqa: E402 from promptflow._cli._pf._connection import add_connection_parser, dispatch_connection_commands # noqa: E402 from promptflow._cli._pf._flow import add_flow_parser, dispatch_flow_commands # noqa: E402 from promptflow._cli._pf._run import add_run_parser, dispatch_run_commands # noqa: E402 from promptflow._cli._pf._tool import add_tool_parser, dispatch_tool_commands # noqa: E402 from promptflow._cli._pf.help import show_privacy_statement, show_welcome_message # noqa: E402 from promptflow._cli._pf._upgrade import add_upgrade_parser, upgrade_version # noqa: E402 from promptflow._cli._user_agent import USER_AGENT # noqa: E402 from promptflow._sdk._utils import ( # noqa: E402 get_promptflow_sdk_version, print_pf_version, setup_user_agent_to_operation_context, ) from promptflow._utils.logger_utils import get_cli_sdk_logger # noqa: E402 # get logger for CLI logger = get_cli_sdk_logger() def run_command(args): # Log the init finish time init_finish_time = time.perf_counter() try: # --verbose, enable info logging if hasattr(args, "verbose") and args.verbose: for handler in logger.handlers: handler.setLevel(logging.INFO) # --debug, enable debug logging if hasattr(args, "debug") and args.debug: for handler in logger.handlers: handler.setLevel(logging.DEBUG) if args.version: print_pf_version() elif args.action == "flow": dispatch_flow_commands(args) elif args.action == "connection": dispatch_connection_commands(args) elif args.action == "run": dispatch_run_commands(args) elif args.action == "config": dispatch_config_commands(args) elif args.action == "tool": dispatch_tool_commands(args) elif args.action == "upgrade": upgrade_version(args) elif args.action == "experiment": dispatch_experiment_commands(args) except KeyboardInterrupt as ex: logger.debug("Keyboard interrupt is captured.") # raise UserErrorException(error=ex) # Cant't raise UserErrorException due to the code exit(1) of promptflow._cli._utils.py line 368. raise ex except SystemExit as ex: # some code directly call sys.exit, this is to make sure command metadata is logged exit_code = ex.code if ex.code is not None else 1 logger.debug(f"Code directly call sys.exit with code {exit_code}") # raise UserErrorException(error=ex) # Cant't raise UserErrorException due to the code exit(1) of promptflow._cli._utils.py line 368. raise ex except Exception as ex: logger.debug(f"Command {args} execute failed. {str(ex)}") # raise UserErrorException(error=ex) # Cant't raise UserErrorException due to the code exit(1) of promptflow._cli._utils.py line 368. raise ex finally: # Log the invoke finish time invoke_finish_time = time.perf_counter() logger.info( "Command ran in %.3f seconds (init: %.3f, invoke: %.3f)", invoke_finish_time - start_time, init_finish_time - start_time, invoke_finish_time - init_finish_time, ) def get_parser_args(argv): parser = argparse.ArgumentParser( prog="pf", formatter_class=argparse.RawDescriptionHelpFormatter, description="pf: manage prompt flow assets. Learn more: https://microsoft.github.io/promptflow.", ) parser.add_argument( "-v", "--version", dest="version", action="store_true", help="show current CLI version and exit" ) subparsers = parser.add_subparsers() add_upgrade_parser(subparsers) add_flow_parser(subparsers) add_connection_parser(subparsers) add_run_parser(subparsers) add_config_parser(subparsers) add_tool_parser(subparsers) if Configuration.get_instance().is_internal_features_enabled(): add_experiment_parser(subparsers) return parser.prog, parser.parse_args(argv) def entry(argv): """ Control plane CLI tools for promptflow. """ prog, args = get_parser_args(argv) if hasattr(args, "user_agent"): setup_user_agent_to_operation_context(args.user_agent) logger = get_telemetry_logger() activity_name = _get_cli_activity_name(cli=prog, args=args) activity_name = update_activity_name(activity_name, args=args) with log_activity( logger, activity_name, activity_type=ActivityType.PUBLICAPI, ): run_command(args) def main(): """Entrance of pf CLI.""" command_args = sys.argv[1:] if len(command_args) == 1 and command_args[0] == "version": version_dict = {"promptflow": get_promptflow_sdk_version()} version_dict_string = json.dumps(version_dict, ensure_ascii=False, indent=2, sort_keys=True, separators=(",", ": ")) + "\n" print(version_dict_string) return if len(command_args) == 0: # print privacy statement & welcome message like azure-cli show_privacy_statement() show_welcome_message() command_args.append("-h") elif len(command_args) == 1: # pf only has "pf --version" with 1 layer if command_args[0] not in ["--version", "-v", "upgrade"]: command_args.append("-h") setup_user_agent_to_operation_context(USER_AGENT) entry(command_args) if __name__ == "__main__": main()
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/_connection.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import json from functools import partial from promptflow._cli._params import ( add_param_all_results, add_param_max_results, add_param_set, add_param_yes, base_params, ) from promptflow._cli._utils import activate_action, confirm, exception_handler, get_secret_input, print_yellow_warning from promptflow._sdk._constants import MAX_LIST_CLI_RESULTS from promptflow._sdk._load_functions import load_connection from promptflow._sdk._pf_client import PFClient from promptflow._sdk.entities._connection import _Connection from promptflow._utils.logger_utils import get_cli_sdk_logger from promptflow._utils.yaml_utils import load_yaml logger = get_cli_sdk_logger() _client = None def _get_pf_client(): global _client if _client is None: _client = PFClient() return _client def add_param_file(parser): parser.add_argument("--file", "-f", type=str, help="File path of the connection yaml.", required=True) def add_param_name(parser, required=False): parser.add_argument("--name", "-n", type=str, help="Name of the connection.", required=required) def add_connection_parser(subparsers): connection_parser = subparsers.add_parser( "connection", description="""A CLI tool to manage connections for promptflow. Your secrets will be encrypted using AES(Advanced Encryption Standard) technology.""", # noqa: E501 help="pf connection", ) subparsers = connection_parser.add_subparsers() add_connection_create(subparsers) add_connection_update(subparsers) add_connection_show(subparsers) add_connection_list(subparsers) add_connection_delete(subparsers) connection_parser.set_defaults(action="connection") def add_connection_create(subparsers): # Do not change the indent of epilog epilog = """ Examples: # Creating a connection with yaml file: pf connection create -f connection.yaml # Creating a connection with yaml file and overrides: pf connection create -f connection.yaml --set api_key="my_api_key" # Creating a custom connection with .env file, note that overrides specified by --set will be ignored: pf connection create -f .env --name custom """ activate_action( name="create", description="Create a connection.", epilog=epilog, add_params=[add_param_set, add_param_file, add_param_name] + base_params, subparsers=subparsers, help_message="Create a connection.", action_param_name="sub_action", ) def add_connection_update(subparsers): epilog = """ Examples: # Updating a connection: pf connection update -n my_connection --set api_key="my_api_key" """ activate_action( name="update", description="Update a connection.", epilog=epilog, add_params=[add_param_set, partial(add_param_name, required=True)] + base_params, subparsers=subparsers, help_message="Update a connection.", action_param_name="sub_action", ) def add_connection_show(subparsers): epilog = """ Examples: # Get and show a connection: pf connection show -n my_connection_name """ activate_action( name="show", description="Show a connection for promptflow.", epilog=epilog, add_params=[partial(add_param_name, required=True)] + base_params, subparsers=subparsers, help_message="Show a connection for promptflow.", action_param_name="sub_action", ) def add_connection_delete(subparsers): epilog = """ Examples: # Delete a connection: pf connection delete -n my_connection_name """ activate_action( name="delete", description="Delete a connection with specific name.", epilog=epilog, add_params=[partial(add_param_name, required=True), add_param_yes] + base_params, subparsers=subparsers, help_message="Delete a connection with specific name.", action_param_name="sub_action", ) def add_connection_list(subparsers): epilog = """ Examples: # List all connections: pf connection list """ activate_action( name="list", description="List all connections.", epilog=epilog, add_params=[add_param_max_results, add_param_all_results] + base_params, subparsers=subparsers, help_message="List all connections.", action_param_name="sub_action", ) def validate_and_interactive_get_secrets(connection, is_update=False): """Validate the connection and interactive get secrets if no secrets is provided.""" prompt = "=================== Please input required secrets ===================" missing_secrets_prompt = False for name, val in connection.secrets.items(): if not _Connection._is_scrubbed_value(val) and not _Connection._is_user_input_value(val): # Not scrubbed value, not require user input. continue if is_update and _Connection._is_scrubbed_value(val): # Scrubbed value, will use existing, not require user input. continue if not missing_secrets_prompt: print(prompt) missing_secrets_prompt = True while True: secret = get_secret_input(prompt=f"{name}: ") if secret: break print_yellow_warning("Secret can't be empty.") connection.secrets[name] = secret if missing_secrets_prompt: print("=================== Required secrets collected ===================") return connection # Note the connection secrets value behaviors: # -------------------------------------------------------------------------------- # | secret value | CLI create | CLI update | SDK create_or_update | # -------------------------------------------------------------------------------- # | empty or all "*" | prompt input | use existing values | use existing values | # | <no-change> | prompt input | use existing values | use existing values | # | <user-input> | prompt input | prompt input | raise error | # -------------------------------------------------------------------------------- @exception_handler("Connection create") def create_connection(file_path, params_override=None, name=None): params_override = params_override or [] if name: params_override.append({"name": name}) connection = load_connection(source=file_path, params_override=params_override) existing_connection = _get_pf_client().connections.get(connection.name, raise_error=False) if existing_connection: logger.warning(f"Connection with name {connection.name} already exists. Updating it.") # Note: We don't set the existing secret back here, let user input the secrets. validate_and_interactive_get_secrets(connection) connection = _get_pf_client().connections.create_or_update(connection) print(json.dumps(connection._to_dict(), indent=4)) @exception_handler("Connection show") def show_connection(name): connection = _get_pf_client().connections.get(name) print(json.dumps(connection._to_dict(), indent=4)) @exception_handler("Connection list") def list_connection(max_results=MAX_LIST_CLI_RESULTS, all_results=False): connections = _get_pf_client().connections.list(max_results, all_results) print(json.dumps([connection._to_dict() for connection in connections], indent=4)) def _upsert_connection_from_file(file, params_override=None): # Note: This function is used for pfutil, do not edit it. params_override = params_override or [] params_override.append(load_yaml(file)) connection = load_connection(source=file, params_override=params_override) existing_connection = _get_pf_client().connections.get(connection.name, raise_error=False) if existing_connection: connection = _Connection._load(data=existing_connection._to_dict(), params_override=params_override) validate_and_interactive_get_secrets(connection, is_update=True) # Set the secrets not scrubbed, as _to_dict() dump scrubbed connections. connection._secrets = existing_connection._secrets else: validate_and_interactive_get_secrets(connection) connection = _get_pf_client().connections.create_or_update(connection) return connection @exception_handler("Connection update") def update_connection(name, params_override=None): params_override = params_override or [] existing_connection = _get_pf_client().connections.get(name) connection = _Connection._load(data=existing_connection._to_dict(), params_override=params_override) validate_and_interactive_get_secrets(connection, is_update=True) # Set the secrets not scrubbed, as _to_dict() dump scrubbed connections. connection._secrets = existing_connection._secrets connection = _get_pf_client().connections.create_or_update(connection) print(json.dumps(connection._to_dict(), indent=4)) @exception_handler("Connection delete") def delete_connection(name, skip_confirm: bool = False): if confirm("Are you sure you want to perform this operation?", skip_confirm): _get_pf_client().connections.delete(name) else: print("The delete operation was canceled.") def dispatch_connection_commands(args: argparse.Namespace): if args.sub_action == "create": create_connection(args.file, args.params_override, args.name) elif args.sub_action == "show": show_connection(args.name) elif args.sub_action == "list": list_connection(args.max_results, args.all_results) elif args.sub_action == "update": update_connection(args.name, args.params_override) elif args.sub_action == "delete": delete_connection(args.name, args.yes)
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/_upgrade.py
import os from promptflow._cli._params import ( add_param_yes, base_params, ) from promptflow._cli._utils import activate_action, get_cli_sdk_logger from promptflow._utils.utils import prompt_y_n from promptflow.exceptions import UserErrorException logger = get_cli_sdk_logger() UPGRADE_MSG = 'Not able to upgrade automatically' def add_upgrade_parser(subparsers): """Add upgrade parser to the pf subparsers.""" epilog = """ Examples: # Upgrade prompt flow without prompt and run non-interactively: pf upgrade --yes """ # noqa: E501 add_params = [ add_param_yes, ] + base_params activate_action( name="upgrade", description="Upgrade prompt flow CLI.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="pf upgrade", action_param_name="action", ) def upgrade_version(args): import platform import sys import subprocess from promptflow._constants import _ENV_PF_INSTALLER, CLI_PACKAGE_NAME from promptflow._version import VERSION as local_version from packaging.version import parse from promptflow._utils.version_hint_utils import get_latest_version_from_pypi latest_version = get_latest_version_from_pypi(CLI_PACKAGE_NAME) if not latest_version: logger.warning("Failed to get the latest prompt flow version.") return elif parse(latest_version) <= parse(local_version): logger.warning("You already have the latest prompt flow version: %s", local_version) return yes = args.yes exit_code = 0 installer = os.getenv(_ENV_PF_INSTALLER) or '' installer = installer.upper() print(f"installer: {installer}") latest_version_msg = 'Upgrading prompt flow CLI version to {}.'.format(latest_version) if yes \ else 'Latest version available is {}.'.format(latest_version) logger.warning("Your current prompt flow CLI version is %s. %s", local_version, latest_version_msg) if not yes: logger.warning("Please check the release notes first") if not sys.stdin.isatty(): logger.debug('No tty available.') raise UserErrorException("No tty available. Please run command with --yes.") confirmation = prompt_y_n("Do you want to continue?", default='y') if not confirmation: logger.debug("Upgrade stopped by user") return if installer == 'MSI': _upgrade_on_windows(yes) elif installer == 'PIP': pip_args = [sys.executable, '-m', 'pip', 'install', '--upgrade', 'promptflow[azure,executable,pfs,azureml-serving]', '-vv', '--disable-pip-version-check', '--no-cache-dir'] logger.debug("Update prompt flow with '%s'", " ".join(pip_args)) exit_code = subprocess.call(pip_args, shell=platform.system() == 'Windows') elif installer == 'SCRIPT': command = "curl https://promptflowartifact.blob.core.windows.net/linux-install-scripts/install | bash" logger.warning(f"{UPGRADE_MSG}, you can try to run {command} in your terminal directly to upgrade package.") return else: logger.warning(UPGRADE_MSG) return if exit_code: err_msg = "CLI upgrade failed." logger.warning(err_msg) sys.exit(exit_code) import importlib import json importlib.reload(subprocess) importlib.reload(json) version_result = subprocess.check_output(['pf', 'version'], shell=platform.system() == 'Windows') version_json = json.loads(version_result) new_version = version_json['promptflow'] if new_version == local_version: err_msg = f"CLI upgrade to version {latest_version} failed or aborted." logger.warning(err_msg) sys.exit(1) logger.warning("Upgrade finished.") def _upgrade_on_windows(yes): """Download MSI to a temp folder and install it with msiexec.exe. Directly installing from URL may be blocked by policy: https://github.com/Azure/azure-cli/issues/19171 This also gives the user a chance to manually install the MSI in case of msiexec.exe failure. """ import subprocess import sys import tempfile msi_url = 'https://aka.ms/installpromptflowwindowsx64' logger.warning("Updating prompt flow CLI with MSI from %s", msi_url) # Save MSI to ~\AppData\Local\Temp\promptflow-msi, clean up the folder first msi_dir = os.path.join(tempfile.gettempdir(), 'promptflow-msi') try: import shutil shutil.rmtree(msi_dir) except FileNotFoundError: # The folder has already been deleted. No further retry is needed. # errno: 2, winerror: 3, strerror: 'The system cannot find the path specified' pass except OSError as err: logger.warning("Failed to delete '%s': %s. You may try to delete it manually.", msi_dir, err) os.makedirs(msi_dir, exist_ok=True) msi_path = _download_from_url(msi_url, msi_dir) if yes: subprocess.Popen(['msiexec.exe', '/i', msi_path, '/qn']) else: subprocess.call(['msiexec.exe', '/i', msi_path]) logger.warning("Installation started. Please complete the upgrade in the opened window.") sys.exit(0) def _download_from_url(url, target_dir): import requests r = requests.get(url, stream=True) if r.status_code != 200: raise UserErrorException("Request to {} failed with {}".format(url, r.status_code)) # r.url is the real path of the msi, like # 'https://promptflowartifact.blob.core.windows.net/msi-installer/promptflow.msi' file_name = r.url.rsplit('/')[-1] msi_path = os.path.join(target_dir, file_name) logger.warning("Downloading MSI to %s", msi_path) with open(msi_path, 'wb') as f: for chunk in r.iter_content(chunk_size=1024): f.write(chunk) return msi_path
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/_init_entry_generators.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import inspect import json import shutil from abc import ABC, abstractmethod from ast import literal_eval from enum import Enum from pathlib import Path from jinja2 import Environment, Template, meta from promptflow._sdk._constants import DEFAULT_ENCODING from promptflow._sdk.operations._flow_operations import FlowOperations from promptflow._utils.logger_utils import get_cli_sdk_logger from promptflow.contracts.flow import Flow as ExecutableFlow from promptflow.exceptions import UserErrorException logger = get_cli_sdk_logger() TEMPLATE_PATH = Path(__file__).parent.parent / "data" / "entry_flow" CHAT_FLOW_TEMPLATE_PATH = Path(__file__).parent.parent / "data" / "chat_flow" / "template" TOOL_TEMPLATE_PATH = Path(__file__).parent.parent / "data" / "package_tool" EXTRA_FILES_MAPPING = {"requirements.txt": "requirements_txt", ".gitignore": "gitignore"} SERVE_TEMPLATE_PATH = Path(__file__).resolve().parent.parent.parent / "_sdk" / "data" / "executable" class BaseGenerator(ABC): @property @abstractmethod def tpl_file(self): pass @property @abstractmethod def entry_template_keys(self): pass def generate(self) -> str: """Generate content based on given template and actual value of template keys.""" with open(self.tpl_file, encoding=DEFAULT_ENCODING) as f: entry_template = f.read() entry_template = Template(entry_template, trim_blocks=True, lstrip_blocks=True) return entry_template.render(**{key: getattr(self, key) for key in self.entry_template_keys}) def generate_to_file(self, target): """Generate content to a file based on given template and actual value of template keys.""" target = Path(target).resolve() action = "Overwriting" if target.exists() else "Creating" print(f"{action} {target.resolve()}...") with open(target, "w", encoding=DEFAULT_ENCODING) as f: f.write(self.generate()) class ToolPyGenerator(BaseGenerator): def __init__(self, entry, function, function_obj): self.function_import = f"from {Path(entry).stem} import {function}" self.entry_function = function self.tool_function = f"{function}_tool" # TODO: support default for tool args self.tool_arg_list = inspect.signature(function_obj).parameters.values() @property def tpl_file(self): return TEMPLATE_PATH / "tool.py.jinja2" @property def entry_template_keys(self): return ["function_import", "entry_function", "tool_function", "tool_arg_list"] class ValueType(str, Enum): INT = "int" DOUBLE = "double" BOOL = "bool" STRING = "string" LIST = "list" OBJECT = "object" @staticmethod def from_type(t: type): if t == int: return ValueType.INT if t == float: return ValueType.DOUBLE if t == bool: return ValueType.BOOL if t == str: return ValueType.STRING if t == list: return ValueType.LIST return ValueType.OBJECT class ToolMetaGenerator(BaseGenerator): def __init__(self, tool_py, function, function_obj, prompt_params): self.tool_file = tool_py self.tool_function = f"{function}_tool" # TODO: support default for tool meta args self.tool_meta_args = self.get_tool_meta_args(function_obj) self._prompt_params = prompt_params @property def prompt_params(self): from promptflow._core.tool_meta_generator import generate_prompt_meta_dict prompt_objs = {} for key, file_name in self._prompt_params.items(): file_path = Path(file_name) if not file_path.exists(): logger.warning( f'Cannot find the prompt template "{file_name}", creating an empty prompt file in the flow...' ) with open(file_path, "w") as f: f.write("{# please enter your prompt content in this file. #}") with open(file_name, "r") as f: content = f.read() name = Path(file_name).stem prompt_objs[key] = generate_prompt_meta_dict(name, content, prompt_only=True, source=file_name) return prompt_objs def get_tool_meta_args(self, function_obj): func_params = inspect.signature(function_obj).parameters # TODO: Support enum/union in the future return {k: ValueType.from_type(v.annotation).value for k, v in func_params.items()} @property def tpl_file(self): return TEMPLATE_PATH / "flow.tools.json.jinja2" @property def entry_template_keys(self): return ["prompt_params", "tool_file", "tool_meta_args", "tool_function"] class FlowDAGGenerator(BaseGenerator): def __init__(self, tool_py, function, function_obj, prompt_params): self.tool_file = tool_py self.main_node_name = function self.prompt_params = prompt_params self.setup_sh = None self.python_requirements_txt = None self._prompt_inputs = None self._func_params = None self._function_obj = function_obj # Abstract prompt param from tool meta args self.flow_inputs = self.get_flow_inputs(prompt_params) def get_flow_inputs(self, prompt_params): """Generate the flow inputs""" flow_inputs = { k: ValueType.from_type(v.annotation).value for k, v in self.func_params.items() if k not in prompt_params } for prompt_inputs in self.prompt_inputs.values(): flow_inputs.update(prompt_inputs) return flow_inputs @property def tpl_file(self): return TEMPLATE_PATH / "flow.dag.yaml.jinja2" @property def func_params(self): """Generate function inputs without prompt templates.""" if self._func_params is None: self._func_params = { k: v for k, v in inspect.signature(self._function_obj).parameters.items() if k not in self.prompt_params } return self._func_params @property def prompt_inputs(self): """Generate prompt inputs.""" if self._prompt_inputs is None: self._prompt_inputs = {} for prompt_name, file_name in self.prompt_params.items(): try: with open(file_name, "r") as f: env = Environment() ast = env.parse(f.read()) variables = meta.find_undeclared_variables(ast) self._prompt_inputs[prompt_name] = {item: "string" for item in variables or []} except Exception as e: logger.warning(f"Get the prompt input from {file_name} failed, {e}.") return self._prompt_inputs @property def entry_template_keys(self): return [ "flow_inputs", "main_node_name", "prompt_params", "tool_file", "setup_sh", "python_requirements_txt", "prompt_inputs", "func_params", ] def generate_to_file(self, target): # Get requirements.txt and setup.sh from target folder. requirements_file = "requirements.txt" if (Path(target).parent / requirements_file).exists(): self.python_requirements_txt = requirements_file setup_file = "setup.sh" if (Path(target).parent / setup_file).exists(): self.setup_sh = setup_file super().generate_to_file(target=target) class FlowMetaYamlGenerator(BaseGenerator): def __init__(self, flow_name): self.flow_name = flow_name @property def tpl_file(self): return TEMPLATE_PATH / "flow.meta.yaml.jinja2" @property def entry_template_keys(self): return ["flow_name"] class StreamlitFileReplicator: def __init__(self, flow_name, flow_dag_path): self.flow_name = flow_name self.flow_dag_path = Path(flow_dag_path) self.executable = ExecutableFlow.from_yaml( flow_file=Path(self.flow_dag_path.name), working_dir=self.flow_dag_path.parent ) self.is_chat_flow, self.chat_history_input_name, error_msg = FlowOperations._is_chat_flow(self.executable) @property def flow_inputs(self): if self.is_chat_flow: results = {} for flow_input, value in self.executable.inputs.items(): if value.is_chat_input: if value.type.value not in [ValueType.STRING.value, ValueType.LIST.value]: raise UserErrorException( f"Only support string or list type for chat input, but got {value.type.value}." ) results.update({flow_input: (value.default, value.type.value)}) else: results = { flow_input: (value.default, value.type.value) for flow_input, value in self.executable.inputs.items() } return results @property def label(self): return "Chat" if self.is_chat_flow else "Run" @property def py_file(self): return SERVE_TEMPLATE_PATH / "main.py" @property def flow_path(self): return self.flow_dag_path.as_posix() @property def chat_output_name(self): try: output_name = next( filter( lambda key: self.executable.outputs[key].is_chat_output, self.executable.outputs.keys(), ) ) except StopIteration: output_name = None return output_name @property def is_streaming(self): return True if self.is_chat_flow else False @property def entry_template_keys(self): return [ "flow_name", "flow_path", "is_chat_flow", "chat_history_input_name", "flow_inputs", "label", "chat_output_name", "is_streaming", ] def generate_to_file(self, target): if Path(target).name == "main.py": target = Path(target).resolve() shutil.copy(self.py_file, target) config_content = {key: getattr(self, key) for key in self.entry_template_keys} with open(target.parent / "config.json", "w") as file: json.dump(config_content, file, indent=4) else: shutil.copy(SERVE_TEMPLATE_PATH / Path(target).name, target) class ChatFlowDAGGenerator(BaseGenerator): def __init__(self, connection, deployment): self.connection = connection self.deployment = deployment @property def tpl_file(self): return CHAT_FLOW_TEMPLATE_PATH / "flow.dag.yaml.jinja2" @property def entry_template_keys(self): return ["connection", "deployment"] class AzureOpenAIConnectionGenerator(BaseGenerator): def __init__(self, connection): self.connection = connection @property def tpl_file(self): return CHAT_FLOW_TEMPLATE_PATH / "azure_openai.yaml.jinja2" @property def entry_template_keys(self): return ["connection"] class OpenAIConnectionGenerator(BaseGenerator): def __init__(self, connection): self.connection = connection @property def tpl_file(self): return CHAT_FLOW_TEMPLATE_PATH / "openai.yaml.jinja2" @property def entry_template_keys(self): return ["connection"] def copy_extra_files(flow_path, extra_files, overwrite=False): for file_name in extra_files: extra_file_path = ( Path(__file__).parent.parent / "data" / "entry_flow" / EXTRA_FILES_MAPPING.get(file_name, file_name) ) target_path = Path(flow_path) / file_name if target_path.exists() and not overwrite: continue action = "Overwriting" if target_path.exists() else "Creating" print(f"{action} {target_path.resolve()}...") shutil.copy2(extra_file_path, target_path) class ToolPackageGenerator(BaseGenerator): def __init__(self, tool_name, icon=None, extra_info=None): self.tool_name = tool_name self._extra_info = extra_info self.icon = icon @property def extra_info(self): if self._extra_info: extra_info = {} for k, v in self._extra_info.items(): try: extra_info[k] = literal_eval(v) except Exception: extra_info[k] = repr(v) return extra_info else: return {} @property def tpl_file(self): return TOOL_TEMPLATE_PATH / "tool.py.jinja2" @property def entry_template_keys(self): return ["tool_name", "extra_info", "icon"] class SetupGenerator(BaseGenerator): def __init__(self, package_name, tool_name): self.package_name = package_name self.tool_name = tool_name @property def tpl_file(self): return TOOL_TEMPLATE_PATH / "setup.py.jinja2" @property def entry_template_keys(self): return ["package_name", "tool_name"] class ToolPackageUtilsGenerator(BaseGenerator): def __init__(self, package_name): self.package_name = package_name @property def tpl_file(self): return TOOL_TEMPLATE_PATH / "utils.py.jinja2" @property def entry_template_keys(self): return ["package_name"] class ToolReadmeGenerator(BaseGenerator): def __init__(self, package_name, tool_name): self.package_name = package_name self.tool_name = tool_name @property def tpl_file(self): return TOOL_TEMPLATE_PATH / "README.md.jinja2" @property def entry_template_keys(self): return ["package_name", "tool_name"] class InitGenerator(BaseGenerator): @property def tpl_file(self): return TOOL_TEMPLATE_PATH / "init.py" @property def entry_template_keys(self): pass def generate(self) -> str: with open(self.tpl_file) as f: init_content = f.read() return init_content
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/_run.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import json from typing import Callable, Dict, List, Optional, Tuple from promptflow._cli._params import ( add_param_all_results, add_param_archived_only, add_param_columns_mapping, add_param_connections, add_param_environment_variables, add_param_include_archived, add_param_max_results, add_param_output_format, add_param_run_name, add_param_set, add_param_yes, add_parser_build, base_params, ) from promptflow._cli._utils import ( _output_result_list_with_format, activate_action, confirm, exception_handler, list_of_dict_to_dict, list_of_dict_to_nested_dict, pretty_print_dataframe_as_table, ) from promptflow._sdk._constants import MAX_SHOW_DETAILS_RESULTS, get_list_view_type from promptflow._sdk._load_functions import load_run from promptflow._sdk._pf_client import PFClient from promptflow._sdk._run_functions import _create_run from promptflow._sdk._utils import safe_parse_object_list from promptflow._sdk.entities import Run from promptflow.exceptions import UserErrorException def add_run_parser(subparsers): run_parser = subparsers.add_parser("run", description="A CLI tool to manage runs for prompt flow.", help="pf run") subparsers = run_parser.add_subparsers() add_run_create(subparsers) # add_run_cancel(subparsers) add_run_update(subparsers) add_run_stream(subparsers) add_run_list(subparsers) add_run_show(subparsers) add_run_show_details(subparsers) add_run_show_metrics(subparsers) add_run_visualize(subparsers) add_run_archive(subparsers) add_run_restore(subparsers) add_run_delete(subparsers) add_parser_build(subparsers, "run") run_parser.set_defaults(action="run") def add_run_create_common(subparsers, add_param_list, epilog: Optional[str] = None): # pf run create --file batch_run.yaml [--stream] add_param_file = lambda parser: parser.add_argument( # noqa: E731 "-f", "--file", dest="file", type=str, help="Local path to the YAML file containing the run definition. " "Reference https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json for the schema.", ) add_param_stream = lambda parser: parser.add_argument( # noqa: E731 "-s", "--stream", action="store_true", default=False, help="Indicates whether to stream the run's logs to the console.", ) add_param_flow = lambda parser: parser.add_argument( # noqa: E731 "--flow", type=str, help="Local path to the flow directory." "If --file is provided, this path should be relative path to the file.", ) add_param_variant = lambda parser: parser.add_argument( # noqa: E731 "--variant", type=str, help="Node & variant name in format of ${node_name.variant_name}." ) add_param_run = lambda parser: parser.add_argument( # noqa: E731 "--run", type=str, help="Referenced flow run name referenced by current run. " "For example, you can run an evaluation flow against an existing run.", ) add_param_name = lambda parser: parser.add_argument("-n", "--name", type=str, help="Name of the run.") # noqa: E731 add_params = [ add_param_file, add_param_stream, add_param_flow, add_param_variant, add_param_run, add_param_name, add_param_columns_mapping, # add env var overwrite add_param_environment_variables, add_param_connections, add_param_set, ] + base_params add_params.extend(add_param_list) create_parser = activate_action( name="create", description=None, epilog=epilog or "pf run create --file <local-path-to-yaml> [--stream]", add_params=add_params, subparsers=subparsers, help_message="Create a run.", action_param_name="sub_action", ) return create_parser def add_run_create(subparsers): epilog = """ Examples: # Create a run with YAML file: pf run create -f <yaml-filename> # Create a run with YAML file and replace another data in the YAML file: pf run create -f <yaml-filename> --data <path-to-new-data-file-relative-to-yaml-file> # Create a run from flow directory and reference a run: pf run create --flow <path-to-flow-directory> --data <path-to-data-file> --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run <run-name> --variant "${summarize_text_content.variant_0}" --stream # noqa: E501 # Create a run from an existing run record folder pf run create --source <path-to-run-folder> """ # data for pf has different help doc than pfazure def add_param_data(parser): parser.add_argument( "--data", type=str, help="Local path to the data file." "If --file is provided, this path should be relative path to the file.", ) def add_param_source(parser): parser.add_argument("--source", type=str, help="Local path to the existing run record folder.") add_run_create_common(subparsers, [add_param_data, add_param_source], epilog=epilog) def add_run_cancel(subparsers): epilog = """ Example: # Cancel a run: pf run cancel --name <name> """ add_params = [add_param_run_name] + base_params activate_action( name="cancel", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Cancel a run.", action_param_name="sub_action", ) def add_run_update(subparsers): epilog = """ Example: # Update a run metadata: pf run update --name <name> --set display_name="<display-name>" description="<description>" tags.key="<value>" """ add_params = [ add_param_run_name, add_param_set, ] + base_params activate_action( name="update", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Update a run metadata, including display name, description and tags.", action_param_name="sub_action", ) def add_run_stream(subparsers): epilog = """ Example: # Stream run logs: pf run stream --name <name> """ add_params = [add_param_run_name] + base_params activate_action( name="stream", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Stream run logs to the console.", action_param_name="sub_action", ) def add_run_list(subparsers): epilog = """ Examples: # List runs status: pf run list # List most recent 10 runs status: pf run list --max-results 10 # List active and archived runs status: pf run list --include-archived # List archived runs status only: pf run list --archived-only # List all runs status: pf run list --all-results # List all runs status as table: pf run list --output table """ add_params = [ add_param_max_results, add_param_all_results, add_param_archived_only, add_param_include_archived, add_param_output_format, ] + base_params activate_action( name="list", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="List runs.", action_param_name="sub_action", ) def add_run_show(subparsers): epilog = """ Example: # Show the status of a run: pf run show --name <name> """ add_params = [add_param_run_name] + base_params activate_action( name="show", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Show details for a run.", action_param_name="sub_action", ) def add_run_show_details(subparsers): epilog = """ Example: # View input(s) and output(s) of a run: pf run show-details --name <name> """ add_param_max_results = lambda parser: parser.add_argument( # noqa: E731 "-r", "--max-results", dest="max_results", type=int, default=MAX_SHOW_DETAILS_RESULTS, help=f"Number of lines to show. Default is {MAX_SHOW_DETAILS_RESULTS}.", ) add_params = [add_param_max_results, add_param_run_name, add_param_all_results] + base_params activate_action( name="show-details", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Preview a run's input(s) and output(s).", action_param_name="sub_action", ) def add_run_show_metrics(subparsers): epilog = """ Example: # View metrics of a run: pf run show-metrics --name <name> """ add_params = [add_param_run_name] + base_params activate_action( name="show-metrics", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Print run metrics to the console.", action_param_name="sub_action", ) def add_run_visualize(subparsers): epilog = """ Examples: # Visualize a run: pf run visualize -n <name> # Visualize runs: pf run visualize --names "<name1,name2>" pf run visualize --names "<name1>, <name2>" """ add_param_name = lambda parser: parser.add_argument( # noqa: E731 "-n", "--names", type=str, required=True, help="Name of the runs, comma separated." ) add_param_html_path = lambda parser: parser.add_argument( # noqa: E731 "--html-path", type=str, default=None, help=argparse.SUPPRESS ) add_params = [add_param_name, add_param_html_path] + base_params activate_action( name="visualize", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Visualize a run.", action_param_name="sub_action", ) def add_run_delete(subparsers): epilog = """ Example: # Caution: pf run delete is irreversible. # This operation will delete the run permanently from your local disk. # Both run entity and output data will be deleted. # Delete a run: pf run delete -n "<name>" """ add_params = [add_param_run_name, add_param_yes] + base_params activate_action( name="delete", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Delete a run irreversible.", action_param_name="sub_action", ) def add_run_archive(subparsers): epilog = """ Example: # Archive a run: pf run archive --name <name> """ add_params = [add_param_run_name] + base_params activate_action( name="archive", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Archive a run.", action_param_name="sub_action", ) def add_run_restore(subparsers): epilog = """ Example: # Restore an archived run: pf run restore --name <name> """ add_params = [add_param_run_name] + base_params activate_action( name="restore", description=None, epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Restore an archived run.", action_param_name="sub_action", ) def dispatch_run_commands(args: argparse.Namespace): if args.sub_action == "create": create_run(create_func=_create_run, args=args) elif args.sub_action == "update": update_run(name=args.name, params=args.params_override) elif args.sub_action == "stream": stream_run(name=args.name) elif args.sub_action == "list": list_runs( max_results=args.max_results, all_results=args.all_results, archived_only=args.archived_only, include_archived=args.include_archived, output=args.output, ) elif args.sub_action == "show": show_run(name=args.name) elif args.sub_action == "show-details": show_run_details(name=args.name, max_results=args.max_results, all_results=args.all_results) elif args.sub_action == "show-metrics": show_run_metrics(name=args.name) elif args.sub_action == "visualize": visualize_run(names=args.names, html_path=args.html_path) elif args.sub_action == "archive": archive_run(name=args.name) elif args.sub_action == "restore": restore_run(name=args.name) elif args.sub_action == "export": export_run(args) elif args.sub_action == "delete": delete_run(args.name, args.yes) else: raise ValueError(f"Unrecognized command: {args.sub_action}") def _parse_metadata_args(params: List[Dict[str, str]]) -> Tuple[Optional[str], Optional[str], Optional[Dict[str, str]]]: display_name, description, tags = None, None, {} for param in params: for k, v in param.items(): if k == "display_name": if display_name is not None: raise ValueError("Duplicate argument: 'display_name'.") display_name = v elif k == "description": if description is not None: raise ValueError("Duplicate argument: 'description'.") description = v elif k.startswith("tags."): tag_key = k.replace("tags.", "") if tag_key in tags: raise ValueError(f"Duplicate argument: 'tags.{tag_key}'.") tags[tag_key] = v if len(tags) == 0: tags = None return display_name, description, tags @exception_handler("Update run") def update_run(name: str, params: List[Dict[str, str]]) -> None: # params_override can have multiple items when user specifies with # `--set key1=value1 key2=value` # so we need to merge them first. display_name, description, tags = _parse_metadata_args(params) pf_client = PFClient() run = pf_client.runs.update( name=name, display_name=display_name, description=description, tags=tags, ) print(json.dumps(run._to_dict(), indent=4)) @exception_handler("Stream run") def stream_run(name: str) -> None: pf_client = PFClient() run = pf_client.runs.stream(name=name) print(json.dumps(run._to_dict(), indent=4)) @exception_handler("List runs") def list_runs( max_results: int, all_results: bool, archived_only: bool, include_archived: bool, output, ): pf_client = PFClient() # aligned behaviour with v2 SDK, all_results will overwrite max_results if all_results: max_results = None runs = pf_client.runs.list( max_results=max_results, list_view_type=get_list_view_type(archived_only=archived_only, include_archived=include_archived), ) # hide additional info and debug info in run list for better user experience parser = lambda run: run._to_dict(exclude_additional_info=True, exclude_debug_info=True) # noqa: E731 json_list = safe_parse_object_list( obj_list=runs, parser=parser, message_generator=lambda x: f"Error parsing run {x.name!r}, skipped.", ) _output_result_list_with_format(result_list=json_list, output_format=output) return runs @exception_handler("Show run") def show_run(name: str) -> None: pf_client = PFClient() run = pf_client.runs.get(name=name) print(json.dumps(run._to_dict(), indent=4)) @exception_handler("Show run details") def show_run_details(name: str, max_results: int, all_results: bool) -> None: pf_client = PFClient() details = pf_client.runs.get_details(name=name, max_results=max_results, all_results=all_results) pretty_print_dataframe_as_table(details) @exception_handler("Show run metrics") def show_run_metrics(name: str) -> None: pf_client = PFClient() metrics = pf_client.runs.get_metrics(name=name) print(json.dumps(metrics, indent=4)) @exception_handler("Visualize run") def visualize_run(names: str, html_path: Optional[str] = None) -> None: run_names = [name.strip() for name in names.split(",")] pf_client = PFClient() pf_client.runs.visualize(run_names, html_path=html_path) @exception_handler("Archive run") def archive_run(name: str) -> None: pf_client = PFClient() run = pf_client.runs.archive(name=name) print(json.dumps(run._to_dict(), indent=4)) @exception_handler("Restore run") def restore_run(name: str) -> None: pf_client = PFClient() run = pf_client.runs.restore(name=name) print(json.dumps(run._to_dict(), indent=4)) def _parse_kv_pair(kv_pairs: str) -> Dict[str, str]: result = {} for kv_pairs in kv_pairs.split(","): kv_pair = kv_pairs.strip() if "=" not in kv_pair: raise ValueError(f"Invalid key-value pair: {kv_pair}") key, value = kv_pair.split("=", 1) result[key] = value return result @exception_handler("Create run") def create_run(create_func: Callable, args): file = args.file flow = args.flow run_source = getattr(args, "source", None) # source is only available for pf args, not pfazure. data = args.data column_mapping = args.column_mapping variant = args.variant name = args.name run = args.run stream = args.stream environment_variables = args.environment_variables connections = args.connections params_override = args.params_override or [] if environment_variables: environment_variables = list_of_dict_to_dict(environment_variables) if connections: connections = list_of_dict_to_nested_dict(connections) if column_mapping: column_mapping = list_of_dict_to_dict(column_mapping) if file: for param_key, param in { "name": name, "flow": flow, "variant": variant, "data": data, "column_mapping": column_mapping, "run": run, "environment_variables": environment_variables, "connections": connections, }.items(): if not param: continue params_override.append({param_key: param}) run = load_run(source=file, params_override=params_override) elif flow: run_data = { "name": name, "flow": flow, "data": data, "column_mapping": column_mapping, "run": run, "variant": variant, "environment_variables": environment_variables, "connections": connections, } # remove empty fields run_data = {k: v for k, v in run_data.items() if v is not None} run = Run._load(data=run_data, params_override=params_override) elif run_source: display_name, description, tags = _parse_metadata_args(params_override) processed_params = { "display_name": display_name, "description": description, "tags": tags, } run = Run._load_from_source(source=run_source, params_override=processed_params) else: raise UserErrorException("To create a run, one of [file, flow, source] must be specified.") run = create_func(run=run, stream=stream) if stream: print("\n") # change new line to show run info print(json.dumps(run._to_dict(), indent=4)) @exception_handler("Delete run") def delete_run(name: str, skip_confirm: bool = False) -> None: if confirm("Are you sure to delete run irreversibly?", skip_confirm): pf_client = PFClient() pf_client.runs.delete(name=name) else: print("The delete operation was canceled.") def export_run(args): raise NotImplementedError()
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/_tool.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import json import re import shutil from pathlib import Path from promptflow._cli._params import add_param_set_tool_extra_info, base_params from promptflow._cli._pf._init_entry_generators import ( InitGenerator, SetupGenerator, ToolPackageGenerator, ToolPackageUtilsGenerator, ToolReadmeGenerator, ) from promptflow._cli._utils import activate_action, exception_handler, list_of_dict_to_dict from promptflow._sdk._constants import DEFAULT_ENCODING from promptflow._sdk._pf_client import PFClient from promptflow._utils.logger_utils import get_cli_sdk_logger from promptflow.exceptions import UserErrorException logger = get_cli_sdk_logger() def add_tool_parser(subparsers): """Add flow parser to the pf subparsers.""" tool_parser = subparsers.add_parser( "tool", description="Manage tools for promptflow.", help="pf tool", ) subparsers = tool_parser.add_subparsers() add_parser_init_tool(subparsers) add_parser_list_tool(subparsers) add_parser_validate_tool(subparsers) tool_parser.set_defaults(action="tool") def add_parser_init_tool(subparsers): """Add tool init parser to the pf tool subparsers.""" epilog = """ Examples: # Creating a package tool from scratch: pf tool init --package package_tool --tool tool_name # Creating a package tool with extra info: pf tool init --package package_tool --tool tool_name --set icon=<icon-path> category=<category> # Creating a python tool from scratch: pf tool init --tool tool_name """ # noqa: E501 add_param_package = lambda parser: parser.add_argument( # noqa: E731 "--package", type=str, help="The package name to create." ) add_param_tool = lambda parser: parser.add_argument( # noqa: E731 "--tool", type=str, required=True, help="The tool name to create." ) add_params = [ add_param_package, add_param_tool, add_param_set_tool_extra_info, ] + base_params return activate_action( name="init", description="Creating a tool.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Initialize a tool directory.", action_param_name="sub_action", ) def add_parser_list_tool(subparsers): """Add tool list parser to the pf tool subparsers.""" epilog = """ Examples: # List all package tool in the environment: pf tool list # List all package tool and code tool in the flow: pf tool list --flow flow-path """ # noqa: E501 add_param_flow = lambda parser: parser.add_argument("--flow", type=str, help="the flow directory") # noqa: E731 add_params = [ add_param_flow, ] + base_params return activate_action( name="list", description="List tools.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="List all tools in the environment.", action_param_name="sub_action", ) def add_parser_validate_tool(subparsers): """Add tool list parser to the pf tool subparsers.""" epilog = """ Examples: # Validate single function tool: pf tool validate -–source <package_name>.<module_name>.<tool_function> # Validate all tool in a package tool: pf tool validate -–source <package_name> # Validate tools in a python script: pf tool validate --source <path_to_tool_script> """ # noqa: E501 def add_param_source(parser): parser.add_argument("--source", type=str, help="The tool source to be used.", required=True) return activate_action( name="validate", description="Validate tool.", epilog=epilog, add_params=[ add_param_source, ], subparsers=subparsers, help_message="Validate tool. Will raise error if it is not valid.", action_param_name="sub_action", ) def dispatch_tool_commands(args: argparse.Namespace): if args.sub_action == "init": init_tool(args) elif args.sub_action == "list": list_tool(args) elif args.sub_action == "validate": validate_tool(args) @exception_handler("Tool init") def init_tool(args): # Validate package/tool name pattern = r"^[a-zA-Z_][a-zA-Z0-9_]*$" if args.package and not re.match(pattern, args.package): raise UserErrorException(f"The package name {args.package} is a invalid identifier.") if not re.match(pattern, args.tool): raise UserErrorException(f"The tool name {args.tool} is a invalid identifier.") print("Creating tool from scratch...") extra_info = list_of_dict_to_dict(args.extra_info) icon_path = extra_info.pop("icon", None) if icon_path and not Path(icon_path).exists(): raise UserErrorException(f"Cannot find the icon path {icon_path}.") if args.package: package_path = Path(args.package) package_name = package_path.stem script_code_path = package_path / package_name script_code_path.mkdir(parents=True, exist_ok=True) # Generate manifest file manifest_file = package_path / "MANIFEST.in" manifest_file.touch(exist_ok=True) with open(manifest_file, "r") as f: manifest_contents = [line.strip() for line in f.readlines()] if icon_path: package_icon_path = package_path / "icons" package_icon_path.mkdir(exist_ok=True) dst = shutil.copy2(icon_path, package_icon_path) icon_path = f'Path(__file__).parent.parent / "icons" / "{Path(dst).name}"' icon_manifest = f"include {package_name}/icons" if icon_manifest not in manifest_contents: manifest_contents.append(icon_manifest) with open(manifest_file, "w", encoding=DEFAULT_ENCODING) as f: f.writelines("\n".join(set(manifest_contents))) # Generate package setup.py SetupGenerator(package_name=package_name, tool_name=args.tool).generate_to_file(package_path / "setup.py") # Generate utils.py to list meta data of tools. ToolPackageUtilsGenerator(package_name=package_name).generate_to_file(script_code_path / "utils.py") ToolReadmeGenerator(package_name=package_name, tool_name=args.tool).generate_to_file(package_path / "README.md") else: script_code_path = Path(".") if icon_path: icon_path = f'"{Path(icon_path).as_posix()}"' # Generate tool script ToolPackageGenerator(tool_name=args.tool, icon=icon_path, extra_info=extra_info).generate_to_file( script_code_path / f"{args.tool}.py" ) InitGenerator().generate_to_file(script_code_path / "__init__.py") print(f'Done. Created the tool "{args.tool}" in {script_code_path.resolve()}.') @exception_handler("Tool list") def list_tool(args): pf_client = PFClient() package_tools = pf_client._tools.list(args.flow) print(json.dumps(package_tools, indent=4)) @exception_handler("Tool validate") def validate_tool(args): import importlib pf_client = PFClient() try: __import__(args.source) source = importlib.import_module(args.source) logger.debug(f"The source {args.source} is used as a package to validate.") except ImportError: try: module_name, func_name = args.source.rsplit(".", 1) module = importlib.import_module(module_name) source = getattr(module, func_name) logger.debug(f"The source {args.source} is used as a function to validate.") except Exception: if not Path(args.source).exists(): raise UserErrorException("Invalid source to validate tools.") logger.debug(f"The source {args.source} is used as a script to validate.") source = args.source validation_result = pf_client._tools.validate(source) print(repr(validation_result)) if not validation_result.passed: exit(1)
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/help.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from promptflow._sdk._configuration import Configuration # This logic is copied from: https://github.com/microsoft/knack/blob/dev/knack/help.py # Will print privacy message and welcome when user run `pf` command. PRIVACY_STATEMENT = """ Welcome to prompt flow! --------------------- Use `pf -h` to see available commands or go to https://aka.ms/pf-cli. Telemetry --------- The prompt flow CLI collects usage data in order to improve your experience. The data is anonymous and does not include commandline argument values. The data is collected by Microsoft. You can change your telemetry settings with `pf config`. """ WELCOME_MESSAGE = r""" ____ _ __ _ | _ \ _ __ ___ _ __ ___ _ __ | |_ / _| | _____ __ | |_) | '__/ _ \| '_ ` _ \| '_ \| __| | |_| |/ _ \ \ /\ / / | __/| | | (_) | | | | | | |_) | |_ | _| | (_) \ V V / |_| |_| \___/|_| |_| |_| .__/ \__| |_| |_|\___/ \_/\_/ |_| Welcome to the cool prompt flow CLI! Use `pf --version` to display the current version. Here are the base commands: """ def show_privacy_statement(): config = Configuration.get_instance() ran_before = config.get_config("first_run") if not ran_before: print(PRIVACY_STATEMENT) config.set_config("first_run", True) def show_welcome_message(): print(WELCOME_MESSAGE)
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf/_config.py
import argparse import json from promptflow._cli._params import add_param_set_positional, base_params from promptflow._cli._utils import activate_action, list_of_dict_to_dict from promptflow._sdk._configuration import Configuration, InvalidConfigValue from promptflow._sdk._utils import print_red_error from promptflow._utils.logger_utils import get_cli_sdk_logger logger = get_cli_sdk_logger() def add_config_set(subparsers): epilog = """ Examples: # Config connection provider to azure workspace for current user: pf config set connection.provider="azureml://subscriptions/<your-subscription>/resourceGroups/<your-resourcegroup>/providers/Microsoft.MachineLearningServices/workspaces/<your-workspace>" """ # noqa: E501 activate_action( name="set", description="Set prompt flow configs for current user.", epilog=epilog, add_params=[add_param_set_positional] + base_params, subparsers=subparsers, help_message="Set prompt flow configs for current user, configs will be stored at ~/.promptflow/pf.yaml.", action_param_name="sub_action", ) def add_config_show(subparsers): epilog = """ Examples: # Show prompt flow for current user: pf config show """ activate_action( name="show", description="Show prompt flow configs for current user.", epilog=epilog, add_params=base_params, subparsers=subparsers, help_message="Show prompt flow configs for current user.", action_param_name="sub_action", ) def add_config_parser(subparsers): config_parser = subparsers.add_parser( "config", description="A CLI tool to set prompt flow configs for current user.", help="pf config" ) subparsers = config_parser.add_subparsers() add_config_set(subparsers) add_config_show(subparsers) config_parser.set_defaults(action="config") def dispatch_config_commands(args: argparse.Namespace): if args.sub_action == "set": set_config(args) if args.sub_action == "show": show_config() def set_config(args): params_override = list_of_dict_to_dict(args.params_override) for k, v in params_override.items(): logger.debug("Setting config %s to %s", k, v) try: Configuration.get_instance().set_config(k, v) print(f"Set config {args.params_override} successfully.") except InvalidConfigValue as e: error_message = f"Invalid config value {v!r} for {k!r}: {str(e)}" print_red_error(error_message) def show_config(): configs = Configuration.get_instance().get_all() print(json.dumps(configs, indent=4))
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf_azure/_flow.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import json from typing import Dict, List from promptflow._cli._params import ( add_param_archived_only, add_param_flow_name, add_param_flow_type, add_param_include_archived, add_param_include_others, add_param_max_results, add_param_output_format, add_param_set, base_params, ) from promptflow._cli._pf_azure._utils import _get_azure_pf_client from promptflow._cli._utils import ( _output_result_list_with_format, _set_workspace_argument_for_subparsers, activate_action, exception_handler, ) from promptflow._sdk._constants import get_list_view_type def add_parser_flow(subparsers): """Add flow parser to the pf subparsers.""" flow_parser = subparsers.add_parser( "flow", description="Manage flows for prompt flow.", help="Manage prompt flows.", ) flow_subparsers = flow_parser.add_subparsers() add_parser_flow_create(flow_subparsers) add_parser_flow_show(flow_subparsers) add_parser_flow_list(flow_subparsers) flow_parser.set_defaults(action="flow") def add_parser_flow_create(subparsers): """Add flow create parser to the pf flow subparsers.""" epilog = """ Use "--set" to set flow properties like: display_name: Flow display name that will be created in remote. Default to be flow folder name + timestamp if not specified. type: Flow type. Default to be "standard" if not specified. Available types are: "standard", "evaluation", "chat". description: Flow description. e.g. "--set description=<description>." tags: Flow tags. e.g. "--set tags.key1=value1 tags.key2=value2." Note: In "--set" parameter, if the key name consists of multiple words, use snake-case instead of kebab-case. e.g. "--set display_name=<flow-display-name>" Examples: # Create a flow to azure portal with local flow folder. pfazure flow create --flow <flow-folder-path> --set display_name=<flow-display-name> type=<flow-type> # Create a flow with more properties pfazure flow create --flow <flow-folder-path> --set display_name=<flow-display-name> type=<flow-type> description=<flow-description> tags.key1=value1 tags.key2=value2 """ # noqa: E501 add_param_source = lambda parser: parser.add_argument( # noqa: E731 "--flow", type=str, help="Source folder of the flow." ) add_params = [ _set_workspace_argument_for_subparsers, add_param_source, add_param_set, ] + base_params activate_action( name="create", description="A CLI tool to create a flow to Azure.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Create a flow to Azure with local flow folder.", action_param_name="sub_action", ) def add_parser_flow_list(subparsers): """Add flow list parser to the pf flow subparsers.""" epilog = """ Examples: # List flows: pfazure flow list # List most recent 10 runs status: pfazure flow list --max-results 10 # List active and archived flows: pfazure flow list --include-archived # List archived flow only: pfazure flow list --archived-only # List all flows as table: pfazure flow list --output table # List flows with specific type: pfazure flow list --type standard # List flows that are owned by all users: pfazure flow list --include-others """ add_params = [ add_param_max_results, add_param_include_others, add_param_flow_type, add_param_archived_only, add_param_include_archived, add_param_output_format, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="list", description="List flows for promptflow.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="pfazure flow list", action_param_name="sub_action", ) def add_parser_flow_show(subparsers): """Add flow get parser to the pf flow subparsers.""" epilog = """ Examples: # Get flow: pfazure flow show --name <flow-name> """ add_params = [add_param_flow_name, _set_workspace_argument_for_subparsers] + base_params activate_action( name="show", description="Show a flow from Azure.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="pfazure flow show", action_param_name="sub_action", ) def add_parser_flow_download(subparsers): """Add flow download parser to the pf flow subparsers.""" add_param_source = lambda parser: parser.add_argument( # noqa: E731 "--source", type=str, help="The flow folder path on file share to download." ) add_param_destination = lambda parser: parser.add_argument( # noqa: E731 "--destination", "-d", type=str, help="The destination folder path to download." ) add_params = [ _set_workspace_argument_for_subparsers, add_param_source, add_param_destination, ] + base_params activate_action( name="download", description="Download a flow from file share to local.", epilog=None, add_params=add_params, subparsers=subparsers, help_message="pf flow download", action_param_name="sub_action", ) def dispatch_flow_commands(args: argparse.Namespace): if args.sub_action == "create": create_flow(args) elif args.sub_action == "show": show_flow(args) elif args.sub_action == "list": list_flows(args) def _get_flow_operation(subscription_id, resource_group, workspace_name): pf_client = _get_azure_pf_client(subscription_id, resource_group, workspace_name) return pf_client._flows @exception_handler("Create flow") def create_flow(args: argparse.Namespace): """Create a flow for promptflow.""" pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug) params = _parse_flow_metadata_args(args.params_override) pf.flows.create_or_update(flow=args.flow, **params) @exception_handler("Show flow") def show_flow(args: argparse.Namespace): """Get a flow for promptflow.""" pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug) flow = pf.flows.get(args.name) print(json.dumps(flow._to_dict(), indent=4)) def list_flows(args: argparse.Namespace): """List flows for promptflow.""" pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug) flows = pf.flows.list( max_results=args.max_results, include_others=args.include_others, flow_type=args.type, list_view_type=get_list_view_type(args.archived_only, args.include_archived), ) flow_list = [flow._to_dict() for flow in flows] _output_result_list_with_format(flow_list, args.output) def _parse_flow_metadata_args(params: List[Dict[str, str]]) -> Dict: result, tags = {}, {} if not params: return result for param in params: for k, v in param.items(): if k.startswith("tags."): tag_key = k.replace("tags.", "") tags[tag_key] = v continue result[k] = v if tags: result["tags"] = tags return result
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf_azure/entry.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # pylint: disable=wrong-import-position import json import time from promptflow._cli._pf.help import show_privacy_statement, show_welcome_message from promptflow._cli._user_agent import USER_AGENT from promptflow._cli._utils import _get_cli_activity_name, get_client_info_for_cli from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity # Log the start time start_time = time.perf_counter() # E402 module level import not at top of file import argparse # noqa: E402 import logging # noqa: E402 import sys # noqa: E402 from promptflow._cli._pf_azure._flow import add_parser_flow, dispatch_flow_commands # noqa: E402 from promptflow._cli._pf_azure._run import add_parser_run, dispatch_run_commands # noqa: E402 from promptflow._sdk._utils import ( # noqa: E402 get_promptflow_sdk_version, print_pf_version, setup_user_agent_to_operation_context, ) from promptflow._utils.logger_utils import get_cli_sdk_logger # noqa: E402 # get logger for CLI logger = get_cli_sdk_logger() def run_command(args): # Log the init finish time init_finish_time = time.perf_counter() try: # --verbose, enable info logging if hasattr(args, "verbose") and args.verbose: for handler in logger.handlers: handler.setLevel(logging.INFO) # --debug, enable debug logging if hasattr(args, "debug") and args.debug: for handler in logger.handlers: handler.setLevel(logging.DEBUG) if args.version: print_pf_version() elif args.action == "run": dispatch_run_commands(args) elif args.action == "flow": dispatch_flow_commands(args) except KeyboardInterrupt as ex: logger.debug("Keyboard interrupt is captured.") raise ex except SystemExit as ex: # some code directly call sys.exit, this is to make sure command metadata is logged exit_code = ex.code if ex.code is not None else 1 logger.debug(f"Code directly call sys.exit with code {exit_code}") raise ex except Exception as ex: logger.debug(f"Command {args} execute failed. {str(ex)}") raise ex finally: # Log the invoke finish time invoke_finish_time = time.perf_counter() logger.info( "Command ran in %.3f seconds (init: %.3f, invoke: %.3f)", invoke_finish_time - start_time, init_finish_time - start_time, invoke_finish_time - init_finish_time, ) def get_parser_args(argv): parser = argparse.ArgumentParser( prog="pfazure", formatter_class=argparse.RawDescriptionHelpFormatter, description="pfazure: manage prompt flow assets in azure. Learn more: https://microsoft.github.io/promptflow.", ) parser.add_argument( "-v", "--version", dest="version", action="store_true", help="show current CLI version and exit" ) subparsers = parser.add_subparsers() add_parser_run(subparsers) add_parser_flow(subparsers) return parser.prog, parser.parse_args(argv) def _get_workspace_info(args): try: subscription_id, resource_group_name, workspace_name = get_client_info_for_cli( subscription_id=args.subscription, resource_group_name=args.resource_group, workspace_name=args.workspace_name, ) return { "subscription_id": subscription_id, "resource_group_name": resource_group_name, "workspace_name": workspace_name, } except Exception: # fall back to empty dict if workspace info is not available return {} def entry(argv): """ Control plane CLI tools for promptflow cloud version. """ prog, args = get_parser_args(argv) if hasattr(args, "user_agent"): setup_user_agent_to_operation_context(args.user_agent) logger = get_telemetry_logger() custom_dimensions = _get_workspace_info(args) with log_activity( logger, _get_cli_activity_name(cli=prog, args=args), activity_type=ActivityType.PUBLICAPI, custom_dimensions=custom_dimensions, ): run_command(args) def main(): """Entrance of pf CLI.""" command_args = sys.argv[1:] if len(command_args) == 1 and command_args[0] == "version": version_dict = {"promptflow": get_promptflow_sdk_version()} return json.dumps(version_dict, ensure_ascii=False, indent=2, sort_keys=True, separators=(",", ": ")) + "\n" if len(command_args) == 0: # print privacy statement & welcome message like azure-cli show_privacy_statement() show_welcome_message() command_args.append("-h") elif len(command_args) == 1: # pfazure only has "pf --version" with 1 layer if command_args[0] not in ["--version", "-v"]: command_args.append("-h") setup_user_agent_to_operation_context(USER_AGENT) entry(command_args) if __name__ == "__main__": main()
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf_azure/_connection.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from pathlib import Path from dotenv import dotenv_values from promptflow._cli._params import add_param_connection_name, add_param_env, base_params from promptflow._cli._utils import _set_workspace_argument_for_subparsers, activate_action, get_client_for_cli from promptflow._utils.logger_utils import get_cli_sdk_logger from promptflow.connections import CustomConnection from promptflow.contracts.types import Secret logger = get_cli_sdk_logger() def add_connection_parser(subparsers): connection_parser = subparsers.add_parser( "connection", description="A CLI tool to manage connections for promptflow.", help="pf connection" ) subparsers = connection_parser.add_subparsers() add_connection_create(subparsers) add_connection_get(subparsers) connection_parser.set_defaults(action="connection") def add_connection_create(subparsers): add_param_type = lambda parser: parser.add_argument( # noqa: E731 "--type", type=str, help='Type of the connection, Possible values include: "OpenAI", "AzureOpenAI", "Serp", "Bing", ' '"Custom", "AzureContentSafety", "CognitiveSearch", "SubstrateLLM', ) add_params = [ _set_workspace_argument_for_subparsers, add_param_connection_name, add_param_type, add_param_env, ] + base_params activate_action( name="create", description="Create a connection for promptflow.", epilog=None, add_params=add_params, subparsers=subparsers, help_message="pf connection create", action_param_name="sub_action", ) def add_connection_get(subparsers): add_params = [ _set_workspace_argument_for_subparsers, add_param_connection_name, add_param_env, ] + base_params activate_action( name="get", description="Get a connection for promptflow.", epilog=None, add_params=add_params, subparsers=subparsers, help_message="pf connection get", action_param_name="sub_action", ) def _get_conn_operations(subscription_id, resource_group, workspace_name): from promptflow.azure import PFClient client = get_client_for_cli( subscription_id=subscription_id, workspace_name=workspace_name, resource_group_name=resource_group ) pf = PFClient(ml_client=client) return pf._connections def create_conn(name, type, env, subscription_id, resource_group, workspace_name): from promptflow._sdk.entities._connection import _Connection if not Path(env).exists(): raise ValueError(f"Env file {env} does not exist.") try: dot_env = dotenv_values(env) except Exception as e: raise ValueError(f"Failed to load env file {env}. Error: {e}") custom_configs = CustomConnection(**{k: Secret(v) for k, v in dot_env.items()}) connection = _Connection(name=name, type=type, custom_configs=custom_configs, connection_scope="WorkspaceShared") conn_ops = _get_conn_operations(subscription_id, resource_group, workspace_name) result = conn_ops.create_or_update(connection=connection) print(result._to_yaml()) def get_conn(name, subscription_id, resource_group, workspace_name): conn_ops = _get_conn_operations(subscription_id, resource_group, workspace_name) result = conn_ops.get(name=name) print(result._to_yaml())
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf_azure/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf_azure/_run.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import functools import json from typing import Dict, List, Optional from promptflow._cli._params import ( add_param_all_results, add_param_archived_only, add_param_include_archived, add_param_max_results, add_param_output, add_param_output_format, add_param_overwrite, add_param_run_name, add_param_set, base_params, ) from promptflow._cli._pf._run import _parse_metadata_args, add_run_create_common, create_run from promptflow._cli._pf_azure._utils import _get_azure_pf_client from promptflow._cli._utils import ( _output_result_list_with_format, _set_workspace_argument_for_subparsers, activate_action, exception_handler, pretty_print_dataframe_as_table, ) from promptflow._sdk._constants import MAX_SHOW_DETAILS_RESULTS, ListViewType from promptflow._sdk._errors import InvalidRunStatusError from promptflow._sdk._utils import print_red_error from promptflow.azure._restclient.flow_service_caller import FlowRequestException def add_parser_run(subparsers): """Add run parser to the pfazure subparsers.""" run_parser = subparsers.add_parser( "run", description="A CLI tool to manage cloud runs for prompt flow.", help="Manage prompt flow runs." ) subparsers = run_parser.add_subparsers() add_run_create_cloud(subparsers) add_parser_run_list(subparsers) add_parser_run_stream(subparsers) add_parser_run_show(subparsers) add_parser_run_show_details(subparsers) add_parser_run_show_metrics(subparsers) add_parser_run_cancel(subparsers) add_parser_run_visualize(subparsers) add_parser_run_archive(subparsers) add_parser_run_restore(subparsers) add_parser_run_update(subparsers) add_parser_run_download(subparsers) run_parser.set_defaults(action="run") def add_run_create_cloud(subparsers): epilog = """ Example: # Create a run with YAML file: pfazure run create -f <yaml-filename> # Create a run from flow directory and reference a run: pfazure run create --flow <path-to-flow-directory> --data <path-to-data-file> --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run <run-name> --variant "${summarize_text_content.variant_0}" --stream # Create a run from existing workspace flow pfazure run create --flow azureml:<flow-name> --data <path-to-data-file> --column-mapping <key-value-pair> # Create a run from existing registry flow pfazure run create --flow azureml://registries/<registry-name>/models/<flow-name>/versions/<version> --data <path-to-data-file> --column-mapping <key-value-pair> """ # noqa: E501 def add_param_data(parser): # cloud pf can also accept remote data parser.add_argument( "--data", type=str, help="Local path to the data file or remote data. e.g. azureml:name:version." ) add_param_runtime = lambda parser: parser.add_argument("--runtime", type=str, help=argparse.SUPPRESS) # noqa: E731 add_param_reset = lambda parser: parser.add_argument( # noqa: E731 "--reset-runtime", action="store_true", help=argparse.SUPPRESS ) add_run_create_common( subparsers, [add_param_data, add_param_runtime, add_param_reset, _set_workspace_argument_for_subparsers], epilog=epilog, ) def add_parser_run_list(subparsers): """Add run list parser to the pfazure subparsers.""" epilog = """ Examples: # List runs status: pfazure run list # List most recent 10 runs status: pfazure run list --max-results 10 # List active and archived runs status: pfazure run list --include-archived # List archived runs status only: pfazure run list --archived-only # List all runs status as table: pfazure run list --output table """ add_params = [ add_param_max_results, add_param_archived_only, add_param_include_archived, add_param_output_format, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="list", description="A CLI tool to List all runs.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="List runs in a workspace.", action_param_name="sub_action", ) def add_parser_run_stream(subparsers): """Add run stream parser to the pfazure subparsers.""" epilog = """ Example: # Stream run logs: pfazure run stream --name <name> """ add_params = [ add_param_run_name, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="stream", description="A CLI tool to stream run logs to the console.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Stream run logs to the console.", action_param_name="sub_action", ) def add_parser_run_show(subparsers): """Add run show parser to the pfazure subparsers.""" epilog = """ Example: # Show the status of a run: pfazure run show --name <name> """ add_params = [ add_param_run_name, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="show", description="A CLI tool to show a run.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Show a run.", action_param_name="sub_action", ) def add_parser_run_show_details(subparsers): """Add run show details parser to the pfazure subparsers.""" epilog = """ Example: # View input(s) and output(s) of a run: pfazure run show-details --name <name> """ add_param_max_results = lambda parser: parser.add_argument( # noqa: E731 "-r", "--max-results", dest="max_results", type=int, default=MAX_SHOW_DETAILS_RESULTS, help=f"Number of lines to show. Default is {MAX_SHOW_DETAILS_RESULTS}.", ) add_params = [ add_param_max_results, add_param_run_name, add_param_all_results, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="show-details", description="A CLI tool to show a run details.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Show a run details.", action_param_name="sub_action", ) def add_parser_run_show_metrics(subparsers): """Add run show metrics parser to the pfazure subparsers.""" epilog = """ Example: # View metrics of a run: pfazure run show-metrics --name <name> """ add_params = [ add_param_run_name, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="show-metrics", description="A CLI tool to show run metrics.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Show run metrics.", action_param_name="sub_action", ) def add_parser_run_cancel(subparsers): """Add run cancel parser to the pfazure subparsers.""" epilog = """ Example: # Cancel a run: pfazure run cancel --name <name> """ add_params = [ add_param_run_name, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="cancel", description="A CLI tool to cancel a run.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Cancel a run.", action_param_name="sub_action", ) def add_parser_run_visualize(subparsers): """Add run visualize parser to the pfazure subparsers.""" epilog = """ Examples: # Visualize a run: pfazure run visualize -n <name> # Visualize runs: pfazure run visualize --names "<name1,name2>" pfazure run visualize --names "<name1>, <name2>" """ add_param_name = lambda parser: parser.add_argument( # noqa: E731 "-n", "--names", type=str, required=True, help="Name of the runs, comma separated." ) add_param_html_path = lambda parser: parser.add_argument( # noqa: E731 "--html-path", type=str, default=None, help=argparse.SUPPRESS ) add_params = [ add_param_name, add_param_html_path, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="visualize", description="A CLI tool to visualize a run.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Visualize a run.", action_param_name="sub_action", ) def add_parser_run_archive(subparsers): """Add run archive parser to the pfazure subparsers.""" epilog = """ Examples: # Archive a run: pfazure run archive -n <name> """ add_params = [ add_param_run_name, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="archive", description="A CLI tool to archive a run.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Archive a run.", action_param_name="sub_action", ) def add_parser_run_restore(subparsers): """Add run restore parser to the pfazure subparsers.""" epilog = """ Examples: # Restore an archived run: pfazure run restore -n <name> """ add_params = [ add_param_run_name, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="restore", description="A CLI tool to restore a run.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Restore a run.", action_param_name="sub_action", ) def add_parser_run_update(subparsers): """Add run update parser to the pfazure subparsers.""" epilog = """ Example: # Update a run metadata: pfazure run update --name <name> --set display_name="<display-name>" description="<description>" tags.key="<value>" """ add_params = [ add_param_run_name, add_param_set, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="update", description="A CLI tool to update a run.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Update a run.", action_param_name="sub_action", ) def add_parser_run_download(subparsers): """Add run download parser to the pfazure subparsers.""" epilog = """ Example: # Download a run data to local: pfazure run download --name <name> --output <output-folder-path> """ add_params = [ add_param_run_name, add_param_output, add_param_overwrite, _set_workspace_argument_for_subparsers, ] + base_params activate_action( name="download", description="A CLI tool to download a run.", epilog=epilog, add_params=add_params, subparsers=subparsers, help_message="Download a run.", action_param_name="sub_action", ) def dispatch_run_commands(args: argparse.Namespace): if args.sub_action == "create": pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug) create_run( create_func=functools.partial( pf.runs.create_or_update, runtime=args.runtime, reset_runtime=args.reset_runtime ), args=args, ) elif args.sub_action == "list": list_runs( args.subscription, args.resource_group, args.workspace_name, args.max_results, args.archived_only, args.include_archived, args.output, ) elif args.sub_action == "show": show_run(args.subscription, args.resource_group, args.workspace_name, args.name) elif args.sub_action == "show-details": show_run_details( args.subscription, args.resource_group, args.workspace_name, args.name, args.max_results, args.all_results, args.debug, ) elif args.sub_action == "show-metrics": show_metrics(args.subscription, args.resource_group, args.workspace_name, args.name) elif args.sub_action == "stream": stream_run(args.subscription, args.resource_group, args.workspace_name, args.name, args.debug) elif args.sub_action == "visualize": visualize( args.subscription, args.resource_group, args.workspace_name, args.names, args.html_path, args.debug, ) elif args.sub_action == "archive": archive_run(args.subscription, args.resource_group, args.workspace_name, args.name) elif args.sub_action == "restore": restore_run(args.subscription, args.resource_group, args.workspace_name, args.name) elif args.sub_action == "update": update_run(args.subscription, args.resource_group, args.workspace_name, args.name, params=args.params_override) elif args.sub_action == "download": download_run(args) elif args.sub_action == "cancel": cancel_run(args) @exception_handler("List runs") def list_runs( subscription_id, resource_group, workspace_name, max_results, archived_only, include_archived, output, ): """List all runs from cloud.""" if max_results < 1: raise ValueError(f"'max_results' must be a positive integer, got {max_results!r}") # Default list_view_type is ACTIVE_ONLY if archived_only and include_archived: raise ValueError("Cannot specify both 'archived_only' and 'include_archived'") list_view_type = ListViewType.ACTIVE_ONLY if archived_only: list_view_type = ListViewType.ARCHIVED_ONLY if include_archived: list_view_type = ListViewType.ALL pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name) runs = pf.runs.list(max_results=max_results, list_view_type=list_view_type) # hide additional info, debug info and properties in run list for better user experience run_list = [ run._to_dict(exclude_additional_info=True, exclude_debug_info=True, exclude_properties=True) for run in runs ] _output_result_list_with_format(result_list=run_list, output_format=output) return runs @exception_handler("Show run") def show_run(subscription_id, resource_group, workspace_name, run_name): """Show a run from cloud.""" pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name) run = pf.runs.get(run=run_name) print(json.dumps(run._to_dict(), indent=4)) @exception_handler("Show run details") def show_run_details(subscription_id, resource_group, workspace_name, run_name, max_results, all_results, debug=False): """Show a run details from cloud.""" pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name, debug=debug) details = pf.runs.get_details(run=run_name, max_results=max_results, all_results=all_results) details.fillna(value="(Failed)", inplace=True) # replace nan with explicit prompt pretty_print_dataframe_as_table(details) @exception_handler("Show run metrics") def show_metrics(subscription_id, resource_group, workspace_name, run_name): """Show run metrics from cloud.""" pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name) metrics = pf.runs.get_metrics(run=run_name) print(json.dumps(metrics, indent=4)) @exception_handler("Stream run") def stream_run(subscription_id, resource_group, workspace_name, run_name, debug=False): """Stream run logs from cloud.""" pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name, debug=debug) run = pf.runs.stream(run_name) print("\n") print(json.dumps(run._to_dict(), indent=4)) @exception_handler("Visualize run") def visualize( subscription_id: str, resource_group: str, workspace_name: str, names: str, html_path: Optional[str] = None, debug: bool = False, ): run_names = [name.strip() for name in names.split(",")] pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name, debug=debug) try: pf.runs.visualize(run_names, html_path=html_path) except FlowRequestException as e: error_message = f"Visualize failed, request service error: {str(e)}" print_red_error(error_message) except InvalidRunStatusError as e: error_message = f"Visualize failed: {str(e)}" print_red_error(error_message) @exception_handler("Archive run") def archive_run( subscription_id: str, resource_group: str, workspace_name: str, run_name: str, ): pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name) run = pf.runs.archive(run=run_name) print(json.dumps(run._to_dict(), indent=4)) @exception_handler("Restore run") def restore_run( subscription_id: str, resource_group: str, workspace_name: str, run_name: str, ): pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name) run = pf.runs.restore(run=run_name) print(json.dumps(run._to_dict(), indent=4)) @exception_handler("Update run") def update_run( subscription_id: str, resource_group: str, workspace_name: str, run_name: str, params: List[Dict[str, str]], ): # params_override can have multiple items when user specifies with # `--set key1=value1 key2=value` # so we need to merge them first. display_name, description, tags = _parse_metadata_args(params) pf = _get_azure_pf_client(subscription_id, resource_group, workspace_name) run = pf.runs.update(run=run_name, display_name=display_name, description=description, tags=tags) print(json.dumps(run._to_dict(), indent=4)) @exception_handler("Download run") def download_run(args: argparse.Namespace): pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug) pf.runs.download(run=args.name, output=args.output, overwrite=args.overwrite) @exception_handler("Cancel run") def cancel_run(args: argparse.Namespace): pf = _get_azure_pf_client(args.subscription, args.resource_group, args.workspace_name, debug=args.debug) pf.runs.cancel(run=args.name)
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/_pf_azure/_utils.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from promptflow._cli._utils import get_client_for_cli from promptflow.azure import PFClient def _get_azure_pf_client(subscription_id, resource_group, workspace_name, debug=False): ml_client = get_client_for_cli( subscription_id=subscription_id, resource_group_name=resource_group, workspace_name=workspace_name ) client = PFClient(ml_client=ml_client, logging_enable=debug) return client
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/data.jsonl
{"groundtruth": "App", "prediction": "App"}
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/line_process.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from promptflow import tool @tool def line_process(groundtruth: str, prediction: str): """ This tool processes the prediction of a single line and returns the processed result. :param groundtruth: the groundtruth of a single line. :param prediction: the prediction of a single line. """ # Add your line processing logic here processed_result = "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect" return processed_result
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: groundtruth: type: string prediction: type: string outputs: results: type: string reference: ${line_process.output} nodes: - name: line_process type: python source: type: code path: line_process.py inputs: groundtruth: ${inputs.groundtruth} prediction: ${inputs.prediction} - name: aggregate type: python source: type: code path: aggregate.py inputs: processed_results: ${line_process.output} aggregation: true environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/aggregate.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from typing import List from promptflow import log_metric, tool @tool def aggregate(processed_results: List[str]): """ This tool aggregates the processed result of all lines and calculate the accuracy. Then log metric for the accuracy. :param processed_results: List of the output of line_process node. """ # Add your aggregation logic here # Aggregate the results of all lines and calculate the accuracy aggregated_result = round((processed_results.count("Correct") / len(processed_results)), 2) # Log metric the aggregate result log_metric(key="accuracy", value=aggregated_result) return aggregated_result
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/.promptflow/flow.tools.json
{ "package": {}, "code": { "line_process.py": { "type": "python", "inputs": { "groundtruth": { "type": [ "string" ] }, "prediction": { "type": [ "string" ] } }, "function": "line_process" }, "aggregate.py": { "type": "python", "inputs": { "processed_results": { "type": [ "object" ] } }, "function": "aggregate" } } }
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/entry_flow/flow.tools.json.jinja2
{ "package": {}, "code": { {% for key, prompt_obj in prompt_params.items() %} "{{ key }}": { "type": "prompt", "inputs": { {% for input_name, value in prompt_obj.get("inputs", {}).items() %} "{{ input_name }}": { "type": [ {% for typ in value["type"] %} "{{ typ.value }}" {% endfor %} ] }{{ "," if not loop.last else "" }} {% endfor %} }, "source": "{{ prompt_obj.source }}" }, {% endfor %} "{{ tool_file }}": { "type": "python", "inputs": { {% for arg, typ in tool_meta_args.items() %} "{{ arg }}": { "type": [ "{{ typ }}" ] }, {% endfor %} "connection": { "type": [ "CustomConnection" ] } }, "function": "{{ tool_function }}", "source": "{{ tool_file }}" } } }
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/entry_flow/requirements_txt
promptflow
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/entry_flow/flow.dag.yaml.jinja2
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: {% for arg, typ in flow_inputs.items() %} {{ arg }}: type: {{ typ }} {% endfor %} outputs: output: type: string reference: {% raw %}${{% endraw %}{{ main_node_name }}.output} nodes: {% for param_name, file in prompt_params.items() %} - name: {{ param_name }} type: prompt source: type: code path: {{ file }} inputs: # Please check the generated prompt inputs {% for arg in prompt_inputs[param_name].keys() %} {{ arg }}: ${inputs.{{ arg }}} {% endfor %} {% endfor %} - name: {{ main_node_name }} type: python source: type: code path: {{ tool_file }} inputs: {# Below are node inputs link to flow inputs #} {% for arg in func_params.keys() %} {{ arg }}: ${inputs.{{ arg }}} {% endfor %} {# Below are node prompt template inputs from prompt nodes #} {% for param_name, file in prompt_params.items() %} {{ param_name }}: {% raw %}${{% endraw %}{{ param_name }}.output} {% endfor %} connection: custom_connection {% if setup_sh or python_requirements_txt %} environment: {% if setup_sh %} setup_sh: {{ setup_sh }} {% endif %} {% if python_requirements_txt %} python_requirements_txt: {{ python_requirements_txt }} {% endif %} {% endif %}
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/entry_flow/tool.py.jinja2
import os from promptflow import tool from promptflow.connections import CustomConnection {{ function_import }} @tool def {{ tool_function }}( {% for arg in tool_arg_list %} {{ arg.name }}, {% endfor %} connection: CustomConnection) -> str: # set environment variables for key, value in dict(connection).items(): os.environ[key] = value # call the entry function return {{ entry_function }}( {% for arg in tool_arg_list %} {{ arg.name }}={{ arg.name }}, {% endfor %} )
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/entry_flow/gitignore
.env __pycache__/ .promptflow/* !.promptflow/flow.tools.json .runs/
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/standard_flow/data.jsonl
{"text": "Hello World!"}
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/standard_flow/hello.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from promptflow import tool # The inputs section will change based on the arguments of the tool function, after you save the code # Adding type to arguments and return value will help the system show the types properly # Please update the function name/signature per need @tool def my_python_tool(input1: str) -> str: return "Prompt: " + input1
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/standard_flow/flow.dag.yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: text: type: string outputs: output_prompt: type: string reference: ${echo_my_prompt.output} nodes: - name: hello_prompt type: prompt source: type: code path: hello.jinja2 inputs: text: ${inputs.text} - name: echo_my_prompt type: python source: type: code path: hello.py inputs: input1: ${hello_prompt.output} environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/standard_flow/hello.jinja2
{# Please replace the template with your own prompt. #} Write a simple {{text}} program that displays the greeting message when executed.
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/standard_flow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/standard_flow/.promptflow/flow.tools.json
{ "package": {}, "code": { "hello.jinja2": { "type": "prompt", "inputs": { "text": { "type": [ "string" ] } } }, "hello.py": { "type": "python", "inputs": { "input1": { "type": [ "string" ] } }, "function": "my_python_tool" } } }
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/package_tool/setup.py.jinja2
from setuptools import find_packages, setup PACKAGE_NAME = "{{ package_name }}" setup( name=PACKAGE_NAME, version="0.0.1", description="This is my tools package", packages=find_packages(), entry_points={ "package_tools": ["{{ package_name }} = {{ package_name }}.utils:list_package_tools"], }, install_requires=[ "promptflow", ] )
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/package_tool/README.md.jinja2
The directory structure in the package tool is as follows: ```python {{ package_name }} │ setup.py # This file contains metadata about your project like the name, version. │ │ MANIFEST.in # This file is used to determine which files to include in the distribution of the project. │ └───{{ package_name }}{{" " * (24 - package_name|length)}}# This is the source directory. All of your project’s source code should be placed in this directory. {{ tool_name }}.py{{ " " * (17 - tool_name|length)}}# The source code of tools. Using the @tool decorator to identify the function as a tool. utils.py # Utility functions for the package. A method for listing all tools defined in the package is generated in this file. __init__.py ``` Please refer to [tool doc](https://microsoft.github.io/promptflow/how-to-guides/develop-a-tool/index.html) for more details about how to develop a tool.
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/package_tool/init.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/package_tool/utils.py.jinja2
from pathlib import Path import importlib.util from promptflow import PFClient package_name = "{{ package_name }}" def list_package_tools(raise_error=False): """ List the meta of all tools in the package. The key of meta dict is the module name of tools and value is the meta data of the tool. """ # This function is auto generated by pf CLI, please do not modify manually. tools = {} pf_client = PFClient() tools = pf_client.tools._list_tools_in_package(package_name, raise_error=raise_error) return tools
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/package_tool/tool.py.jinja2
{% if icon %} from pathlib import Path {% endif %} from promptflow import tool from promptflow.connections import CustomConnection @tool( name="{{ tool_name }}", description="This is {{ tool_name }} tool", {% if icon %} icon={{ icon }}, {% endif %} {% for key, value in extra_info.items() %} {{ key }}={{ value }}, {% endfor %} ) def {{ tool_name }}(connection: CustomConnection, input_text: str) -> str: # Replace with your tool code. # Usually connection contains configs to connect to an API. # Use CustomConnection is a dict. You can use it like: connection.api_key, connection.api_base # Not all tools need a connection. You can remove it if you don't need it. return "Hello " + input_text
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow/flow_files/README.md
# Chat flow Chat flow is designed for conversational application development, building upon the capabilities of standard flow and providing enhanced support for chat inputs/outputs and chat history management. With chat flow, you can easily create a chatbot that handles chat input and output. ## Create connection for LLM tool to use You can follow these steps to create a connection required by a LLM tool. Currently, there are two connection types supported by LLM tool: "AzureOpenAI" and "OpenAI". If you want to use "AzureOpenAI" connection type, you need to create an Azure OpenAI service first. Please refer to [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service/) for more details. If you want to use "OpenAI" connection type, you need to create an OpenAI account first. Please refer to [OpenAI](https://platform.openai.com/) for more details. ```bash # Override keys with --set to avoid yaml file changes # Create open ai connection pf connection create --file openai.yaml --set api_key=<your_api_key> --name open_ai_connection # Create azure open ai connection # pf connection create --file azure_openai.yaml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection ``` Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`. ```bash # show registered connection pf connection show --name open_ai_connection ``` Please refer to connections [document](https://promptflow.azurewebsites.net/community/local/manage-connections.html) and [example](https://github.com/microsoft/promptflow/tree/main/examples/connections) for more details. ## Develop a chat flow The most important elements that differentiate a chat flow from a standard flow are **Chat Input**, **Chat History**, and **Chat Output**. - **Chat Input**: Chat input refers to the messages or queries submitted by users to the chatbot. Effectively handling chat input is crucial for a successful conversation, as it involves understanding user intentions, extracting relevant information, and triggering appropriate responses. - **Chat History**: Chat history is the record of all interactions between the user and the chatbot, including both user inputs and AI-generated outputs. Maintaining chat history is essential for keeping track of the conversation context and ensuring the AI can generate contextually relevant responses. Chat History is a special type of chat flow input, that stores chat messages in a structured format. - **Chat Output**: Chat output refers to the AI-generated messages that are sent to the user in response to their inputs. Generating contextually appropriate and engaging chat outputs is vital for a positive user experience. A chat flow can have multiple inputs, but Chat History and Chat Input are required inputs in chat flow. ## Interact with chat flow Promptflow CLI provides a way to start an interactive chat session for chat flow. Customer can use below command to start an interactive chat session: ``` pf flow test --flow <flow_folder> --interactive ``` After executing this command, customer can interact with the chat flow in the terminal. Customer can press **Enter** to send the message to chat flow. And customer can quit with **ctrl+C**. Promptflow CLI will distinguish the output of different roles by color, <span style="color:Green">User input</span>, <span style="color:Gold">Bot output</span>, <span style="color:Blue">Flow script output</span>, <span style="color:Cyan">Node output</span>. > =========================================<br> > Welcome to chat flow, <You-flow-name>.<br> > Press Enter to send your message.<br> > You can quit with ctrl+C.<br> > =========================================<br> > <span style="color:Green">User:</span> What types of container software there are<br> > <span style="color:Gold">Bot:</span> There are several types of container software available, including:<br> > 1. Docker: This is one of the most popular containerization software that allows developers to package their applications into containers and deploy them across different environments.<br> > 2. Kubernetes: This is an open-source container orchestration platform that automates the deployment, scaling, and management of containerized applications.<br> > > <span style="color:Green">User:</span> What's the different between them<br> > <span style="color:Gold">Bot:</span> The main difference between the various container software systems is their functionality and purpose. Here are some key differences between them:<br> > 1. Docker is more focused on container packaging and deployment, while Kubernetes is more focused on container orchestration and management.<br> > 2. Kubernetes: Kubernetes is a container orchestration tool that helps manage and deploy containers at scale. It automates the deployment, scaling, and management of containerized applications across multiple hosts.<br> If customer adds "--verbose" in the pf command, the output of each step will be displayed. > =========================================<br> > Welcome to chat flow, Template Chat Flow.<br> > Press Enter to send your message.<br> > You can quit with ctrl+C.<br> > =========================================<br> > <span style="color:Green">User:</span> What types of container software there are<br> > <span style="color:Cyan">chat:</span> There are several types of container software available, including:<br> > 1. Docker: A popular container platform that is widely used in the industry.<br> > 2. Kubernetes: A container orchestration tool that helps manage and deploy containers at scale.<br> > > <span style="color:Gold">Bot:</span> There are several types of container software available, including:<br> > 1. Docker: A popular container platform that is widely used in the industry.<br> > 2. Kubernetes: A container orchestration tool that helps manage and deploy containers at scale.<br> > > <span style="color:Green">User:</span> What's the different between them<br> > <span style="color:Cyan">chat:</span> The main differences between container software are in their architecture, feature sets, and use cases. Here are some brief explanations of the differences between the examples I listed:<br> > 1. Docker: Docker is a container platform that is widely used for building, packaging, and deploying containerized applications. It is known for its ease of use, portability, and large ecosystem of tools and services.<br> > 2. Kubernetes: Kubernetes is a container orchestration tool that helps manage and deploy containers at scale. It automates the deployment, scaling, and management of containerized applications across multiple hosts.<br> > > <span style="color:Gold">Bot:</span> The main differences between container software are in their architecture, feature sets, and use cases. Here are some brief explanations of the differences between the examples I listed:<br> > 1. Docker: Docker is a container platform that is widely used for building, packaging, and deploying containerized applications. It is known for its ease of use, portability, and large ecosystem of tools and services.<br> > 2. Kubernetes: Kubernetes is a container orchestration tool that helps manage and deploy containers at scale. It automates the deployment, scaling, and management of containerized applications across multiple hosts.<br>
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow/flow_files/chat.jinja2
system: You are a helpful assistant. {% for item in chat_history %} user: {{item.inputs.question}} assistant: {{item.outputs.answer}} {% endfor %} user: {{question}}
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow/flow_files
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow/flow_files/.promptflow/flow.tools.json
{ "package": {}, "code": { "chat.jinja2": { "type": "llm", "inputs": { "question": { "type": [ "string" ] }, "chat_history": { "type": [ "string" ] } }, "description": "Chat with Chatbot" } } }
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow/template/openai.yaml.jinja2
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json name: {{ connection }} type: open_ai api_key: "<user-input>"
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow/template/azure_openai.yaml.jinja2
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/AzureOpenAIConnection.schema.json name: {{ connection }} type: azure_open_ai api_key: "<user-input>" api_base: "<user-input>" api_type: "azure"
0
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow
promptflow_repo/promptflow/src/promptflow/promptflow/_cli/data/chat_flow/template/flow.dag.yaml.jinja2
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: chat_history: type: list is_chat_history: true default: [] question: type: string is_chat_input: true outputs: answer: type: string reference: ${chat.output} is_chat_output: true nodes: - name: chat type: llm source: type: code path: chat.jinja2 inputs: deployment_name: {{ deployment }} max_tokens: '256' temperature: '0.7' chat_history: ${inputs.chat_history} question: ${inputs.question} api: chat connection: {{ connection }} environment: python_requirements_txt: requirements.txt
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/operations/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore from promptflow._sdk.operations._connection_operations import ConnectionOperations from promptflow._sdk.operations._flow_operations import FlowOperations from promptflow._sdk.operations._run_operations import RunOperations __all__ = ["ConnectionOperations", "FlowOperations", "RunOperations"]
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/connections/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from dataclasses import dataclass, is_dataclass from promptflow._core.tools_manager import register_connections from promptflow._sdk.entities import ( AzureContentSafetyConnection, AzureOpenAIConnection, CognitiveSearchConnection, CustomConnection, FormRecognizerConnection, OpenAIConnection, SerpConnection, CustomStrongTypeConnection, ) from promptflow._sdk.entities._connection import _Connection from promptflow.contracts.types import Secret @dataclass class BingConnection: api_key: Secret url: str = "https://api.bing.microsoft.com/v7.0/search" # We should use unified connection class everywhere. # Do not add new connection class definition directly here. # !!!Attention!!!: Do not add external package connections here. __all__ = [ "OpenAIConnection", "AzureOpenAIConnection", "AzureContentSafetyConnection", "SerpConnection", "CognitiveSearchConnection", "FormRecognizerConnection", "CustomConnection", "CustomStrongTypeConnection", ] register_connections( [v for v in globals().values() if is_dataclass(v) or (isinstance(v, type) and issubclass(v, _Connection))] )
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/storage/_cache_storage.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from dataclasses import dataclass from datetime import datetime from promptflow.contracts.run_info import RunInfo @dataclass class CacheRecord: run_id: str hash_id: str flow_run_id: str flow_id: str cache_string: str end_time: datetime class AbstractCacheStorage: def get_cache_record_list(hash_id: str) -> CacheRecord: pass def persist_cache_result(run_info: RunInfo): pass
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/storage/_run_storage.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from functools import partial from pathlib import Path from typing import Union from promptflow._utils.multimedia_utils import _process_recursively, get_file_reference_encoder from promptflow.contracts.multimedia import Image from promptflow.contracts.run_info import FlowRunInfo from promptflow.contracts.run_info import RunInfo as NodeRunInfo class AbstractRunStorage: def persist_node_run(self, run_info: NodeRunInfo): """Write the node run info to somewhere immediately after the node is executed. :param run_info: The run info of the node. :type run_info: ~promptflow.contracts.run_info.RunInfo """ raise NotImplementedError("AbstractRunStorage is an abstract class, no implementation for persist_node_run.") def persist_flow_run(self, run_info: FlowRunInfo): """Write the flow run info to somewhere immediately after one line data is executed for the flow. :param run_info: The run info of the node. :type run_info: ~promptflow.contracts.run_info.RunInfo """ raise NotImplementedError("AbstractRunStorage is an abstract class, no implementation for persist_flow_run.") class DummyRunStorage(AbstractRunStorage): def persist_node_run(self, run_info: NodeRunInfo): """Dummy implementation for persist_node_run :param run_info: The run info of the node. :type run_info: ~promptflow.contracts.run_info.RunInfo """ pass def persist_flow_run(self, run_info: FlowRunInfo): """Dummy implementation for persist_flow_run :param run_info: The run info of the node. :type run_info: ~promptflow.contracts.run_info.RunInfo """ pass class DefaultRunStorage(AbstractRunStorage): def __init__(self, base_dir: Path = None, sub_dir: Path = None): """Initialize the default run storage. :param base_dir: The base directory to store the multimedia data. :type base_dir: Path :param sub_dir: The sub directory to store the multimedia data. :type sub_dir: Path """ self._base_dir = base_dir self._sub_dir = sub_dir def persist_run_info(self, run_info: Union[FlowRunInfo, NodeRunInfo]): """Persist the multimedia data in run info after execution. :param run_info: The run info of the node or flow. :type run_info: ~promptflow.contracts.run_info.RunInfo or ~promptflow.contracts.run_info.FlowRunInfo """ # Persist and convert images in inputs to path dictionaries. # This replaces any image objects with their corresponding file path dictionaries. if run_info.inputs: run_info.inputs = self._persist_and_convert_images_to_path_dicts(run_info.inputs) # Persist and convert images in output to path dictionaries. # This replaces any image objects with their corresponding file path dictionaries. if run_info.output: serialized_output = self._persist_and_convert_images_to_path_dicts(run_info.output) run_info.output = serialized_output run_info.result = serialized_output # Persist and convert images in api_calls to path dictionaries. # The `inplace=True` parameter is used here to ensure that the original list structure holding generator outputs # is maintained. This allows us to keep tracking the list as it dynamically changes when the generator is # consumed. It is crucial to process the api_calls list in place to avoid losing the reference to the list that # holds the generator items, which is essential for tracing generator execution. if run_info.api_calls: run_info.api_calls = self._persist_and_convert_images_to_path_dicts(run_info.api_calls, inplace=True) def persist_node_run(self, run_info: NodeRunInfo): """Persist the multimedia data in node run info after the node is executed. This method now delegates to the shared persist_run_info method. :param run_info: The run info of the node. :type run_info: NodeRunInfo """ self.persist_run_info(run_info) def persist_flow_run(self, run_info: FlowRunInfo): """Persist the multimedia data in flow run info after one line data is executed for the flow. This method now delegates to the shared persist_run_info method. :param run_info: The run info of the flow. :type run_info: FlowRunInfo """ self.persist_run_info(run_info) def _persist_and_convert_images_to_path_dicts(self, value, inplace=False): """Persist image objects within a Python object to disk and convert them to path dictionaries. This function recursively processes a given Python object, which can be a list, a dictionary, or a nested combination of these, searching for image objects. Each image object encountered is serialized and saved to disk in a pre-defined location using the `_base_dir` and `_sub_dir` attributes. The image object within the original data structure is then replaced with a dictionary that indicates the file path of the serialized image, following the format: `{'data:image/<ext>;path': '.promptflow/intermediate/<image_uuid>.<ext>'}`. The operation can be performed in-place on the original object or on a new copy, depending on the value of the `inplace` parameter. When `inplace` is set to `True`, the original object is modified; when set to `False`, a new object with the converted path dictionaries is returned. :param value: The Python object to be processed, potentially containing image objects. :type value: Any :param inplace: Whether to modify the original object in place (True) or to create a new object with converted path dictionaries (False). :type inplace: bool :return: The original object with converted path dictionaries if `inplace` is True, otherwise a new object with the conversions. :rtype: Any """ if self._base_dir: pfbytes_file_reference_encoder = get_file_reference_encoder( folder_path=self._base_dir, relative_path=self._sub_dir, ) else: pfbytes_file_reference_encoder = None serialization_funcs = {Image: partial(Image.serialize, **{"encoder": pfbytes_file_reference_encoder})} return _process_recursively(value, process_funcs=serialization_funcs, inplace=inplace)
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/storage/run_records.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import json from dataclasses import asdict, dataclass from datetime import datetime from promptflow._utils.dataclass_serializer import serialize from promptflow.contracts.run_info import FlowRunInfo, RunInfo @dataclass class NodeRunRecord: """Dataclass for storing the run record of each node during single line execution on the flow :param str node_name: The name of the node :param int line_number: The line number in the source file :param str run_info: The information about the run :param datetime start_time: The time the node started running :param datetime end_time: The time the node finished running :param str status: The status of the node run """ node_name: str line_number: int run_info: str start_time: datetime end_time: datetime status: str @staticmethod def from_run_info(run_info: RunInfo) -> "NodeRunRecord": """Create a NodeRunRecord from a RunInfo object. :param RunInfo run_info: The run info to create the NodeRunRecord from :return: The created NodeRunRecord :rtype: NodeRunRecord """ return NodeRunRecord( node_name=run_info.node, line_number=run_info.index, run_info=serialize(run_info), start_time=run_info.start_time.isoformat(), end_time=run_info.end_time.isoformat(), status=run_info.status.value, ) def serialize(self) -> str: """Serialize the NodeRunRecord for storage in blob. :return: The serialized result :rtype: str """ return json.dumps(asdict(self)) @dataclass class LineRunRecord: """A dataclass for storing the run record of a single line execution on the flow. :param int line_number: The line number in the record :param str run_info: The information about the line run :param datetime start_time: The time the line started executing :param datetime end_time: The time the line finished executing :param str name: The name of the line run :param str description: The description of the line run :param str status: The status of the line execution :param str tags: The tags associated with the line run """ line_number: int run_info: str start_time: datetime end_time: datetime name: str description: str status: str tags: str @staticmethod def from_run_info(run_info: FlowRunInfo) -> "LineRunRecord": """Create a LineRunRecord from a FlowRunInfo object. :param FlowRunInfo run_info: The run info to create the LineRunRecord from :return: The created LineRunRecord :rtype: LineRunRecord """ return LineRunRecord( line_number=run_info.index, run_info=serialize(run_info), start_time=run_info.start_time.isoformat(), end_time=run_info.end_time.isoformat(), name=run_info.name, description=run_info.description, status=run_info.status.value, tags=run_info.tags, ) def serialize(self) -> str: """Serialize the LineRunRecord for storage in a blob. :return: The serialized result :rtype: str """ return json.dumps(asdict(self))
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/storage/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from ._cache_storage import AbstractCacheStorage # noqa: F401 from ._run_storage import AbstractRunStorage # noqa: F401 __all__ = ["AbstractCacheStorage", "AbstractRunStorage"]
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/storage/_errors.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- class DuplicatedPrimaryKeyException(Exception): pass class NotFoundException(Exception): pass
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/batch/_base_executor_proxy.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import asyncio from datetime import datetime from json import JSONDecodeError from pathlib import Path from typing import Any, Mapping, Optional import httpx from promptflow._constants import LINE_TIMEOUT_SEC from promptflow._core._errors import UnexpectedError from promptflow._utils.exception_utils import ErrorResponse, ExceptionPresenter from promptflow._utils.logger_utils import bulk_logger from promptflow._utils.utils import load_json from promptflow.batch._errors import ExecutorServiceUnhealthy from promptflow.contracts.run_info import FlowRunInfo from promptflow.exceptions import ErrorTarget, ValidationException from promptflow.executor._result import AggregationResult, LineResult from promptflow.storage._run_storage import AbstractRunStorage EXECUTOR_UNHEALTHY_MESSAGE = "The executor service is currently not in a healthy state" class AbstractExecutorProxy: @classmethod def get_tool_metadata(cls, flow_file: Path, working_dir: Optional[Path] = None) -> dict: """Generate tool metadata file for the specified flow.""" return cls._get_tool_metadata(flow_file, working_dir or flow_file.parent) @classmethod def _get_tool_metadata(cls, flow_file: Path, working_dir: Path) -> dict: raise NotImplementedError() @classmethod async def create( cls, flow_file: Path, working_dir: Optional[Path] = None, *, connections: Optional[dict] = None, storage: Optional[AbstractRunStorage] = None, **kwargs, ) -> "AbstractExecutorProxy": """Create a new executor""" raise NotImplementedError() async def destroy(self): """Destroy the executor""" pass async def exec_line_async( self, inputs: Mapping[str, Any], index: Optional[int] = None, run_id: Optional[str] = None, ) -> LineResult: """Execute a line""" raise NotImplementedError() async def exec_aggregation_async( self, batch_inputs: Mapping[str, Any], aggregation_inputs: Mapping[str, Any], run_id: Optional[str] = None, ) -> AggregationResult: """Execute aggregation nodes""" raise NotImplementedError() async def ensure_executor_health(self): """Ensure the executor service is healthy before execution""" pass class APIBasedExecutorProxy(AbstractExecutorProxy): @property def api_endpoint(self) -> str: """The basic API endpoint of the executor service. The executor proxy calls the executor service to get the line results and aggregation result through this endpoint. """ raise NotImplementedError() async def exec_line_async( self, inputs: Mapping[str, Any], index: Optional[int] = None, run_id: Optional[str] = None, ) -> LineResult: start_time = datetime.utcnow() # call execution api to get line results url = self.api_endpoint + "/execution" payload = {"run_id": run_id, "line_number": index, "inputs": inputs} async with httpx.AsyncClient() as client: response = await client.post(url, json=payload, timeout=LINE_TIMEOUT_SEC) # process the response result = self._process_http_response(response) if response.status_code != 200: run_info = FlowRunInfo.create_with_error(start_time, inputs, index, run_id, result) return LineResult(output={}, aggregation_inputs={}, run_info=run_info, node_run_infos={}) return LineResult.deserialize(result) async def exec_aggregation_async( self, batch_inputs: Mapping[str, Any], aggregation_inputs: Mapping[str, Any], run_id: Optional[str] = None, ) -> AggregationResult: # call aggregation api to get aggregation result async with httpx.AsyncClient() as client: url = self.api_endpoint + "/aggregation" payload = {"run_id": run_id, "batch_inputs": batch_inputs, "aggregation_inputs": aggregation_inputs} response = await client.post(url, json=payload, timeout=LINE_TIMEOUT_SEC) result = self._process_http_response(response) return AggregationResult.deserialize(result) async def ensure_executor_startup(self, error_file): """Ensure the executor service is initialized before calling the API to get the results""" try: await self.ensure_executor_health() except ExecutorServiceUnhealthy as ex: # raise the init error if there is any startup_ex = self._check_startup_error_from_file(error_file) or ex bulk_logger.error(f"Failed to start up the executor due to an error: {str(startup_ex)}") await self.destroy() raise startup_ex async def ensure_executor_health(self): """Ensure the executor service is healthy before calling the API to get the results During testing, we observed that the executor service started quickly on Windows. However, there is a noticeable delay in booting on Linux. So we set a specific waiting period. If the executor service fails to return to normal within the allocated timeout, an exception is thrown to indicate a potential problem. """ retry_count = 0 max_retry_count = 20 while retry_count < max_retry_count: if not self._is_executor_active(): bulk_logger.error("The executor service is not active. Please check the logs for more details.") break if await self._check_health(): return # wait for 1s to prevent calling the API too frequently await asyncio.sleep(1) retry_count += 1 raise ExecutorServiceUnhealthy(f"{EXECUTOR_UNHEALTHY_MESSAGE}. Please resubmit your flow and try again.") def _is_executor_active(self): """The interface function to check if the executor service is active""" return True async def _check_health(self): try: health_url = self.api_endpoint + "/health" async with httpx.AsyncClient() as client: response = await client.get(health_url) if response.status_code != 200: bulk_logger.warning(f"{EXECUTOR_UNHEALTHY_MESSAGE}. Response: {response.status_code} - {response.text}") return False return True except Exception as e: bulk_logger.warning(f"{EXECUTOR_UNHEALTHY_MESSAGE}. Error: {str(e)}") return False def _check_startup_error_from_file(self, error_file) -> Exception: error_dict = load_json(error_file) if error_dict: error_response = ErrorResponse.from_error_dict(error_dict) bulk_logger.error( "Error when starting the executor service: " f"[{error_response.innermost_error_code}] {error_response.message}" ) return ValidationException(error_response.message, target=ErrorTarget.BATCH) return None def _process_http_response(self, response: httpx.Response): if response.status_code == 200: # if the status code is 200, the response is the json dict of a line result return response.json() else: # if the status code is not 200, log the error message_format = "Unexpected error when executing a line, status code: {status_code}, error: {error}" bulk_logger.error(message_format.format(status_code=response.status_code, error=response.text)) # if response can be parsed as json, return the error dict # otherwise, wrap the error in an UnexpectedError and return the error dict try: error_dict = response.json() return error_dict["error"] except (JSONDecodeError, KeyError): unexpected_error = UnexpectedError( message_format=message_format, status_code=response.status_code, error=response.text ) return ExceptionPresenter.create(unexpected_error).to_dict()
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/batch/_result.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from dataclasses import dataclass from datetime import datetime from itertools import chain from typing import Any, List, Mapping from promptflow._utils.exception_utils import RootErrorCode from promptflow._utils.openai_metrics_calculator import OpenAIMetricsCalculator from promptflow.contracts.run_info import RunInfo, Status from promptflow.executor._result import AggregationResult, LineResult @dataclass class LineError: """The error of a line in a batch run. It contains the line number and the error dict of a failed line in the batch run. The error dict is gengerated by ExceptionPresenter.to_dict(). """ line_number: int error: Mapping[str, Any] def to_dict(self): return { "line_number": self.line_number, "error": self.error, } @dataclass class ErrorSummary: """The summary of errors in a batch run. :param failed_user_error_lines: The number of lines that failed with user error. :type failed_user_error_lines: int :param failed_system_error_lines: The number of lines that failed with system error. :type failed_system_error_lines: int :param error_list: The line number and error dict of failed lines in the line results. :type error_list: List[~promptflow.batch._result.LineError] :param aggr_error_dict: The dict of node name and error dict of failed nodes in the aggregation result. :type aggr_error_dict: Mapping[str, Any] """ failed_user_error_lines: int failed_system_error_lines: int error_list: List[LineError] aggr_error_dict: Mapping[str, Any] @staticmethod def create(line_results: List[LineResult], aggr_result: AggregationResult): failed_user_error_lines = 0 failed_system_error_lines = 0 error_list: List[LineError] = [] for line_result in line_results: if line_result.run_info.status != Status.Failed: continue flow_run = line_result.run_info if flow_run.error.get("code", "") == RootErrorCode.USER_ERROR: failed_user_error_lines += 1 else: failed_system_error_lines += 1 line_error = LineError( line_number=flow_run.index, error=flow_run.error, ) error_list.append(line_error) error_summary = ErrorSummary( failed_user_error_lines=failed_user_error_lines, failed_system_error_lines=failed_system_error_lines, error_list=sorted(error_list, key=lambda x: x.line_number), aggr_error_dict={ node_name: node_run_info.error for node_name, node_run_info in aggr_result.node_run_infos.items() if node_run_info.status == Status.Failed }, ) return error_summary @dataclass class SystemMetrics: """The system metrics of a batch run.""" total_tokens: int prompt_tokens: int completion_tokens: int duration: float # in seconds @staticmethod def create( start_time: datetime, end_time: datetime, line_results: List[LineResult], aggr_results: AggregationResult ): openai_metrics = SystemMetrics._get_openai_metrics(line_results, aggr_results) return SystemMetrics( total_tokens=openai_metrics.get("total_tokens", 0), prompt_tokens=openai_metrics.get("prompt_tokens", 0), completion_tokens=openai_metrics.get("completion_tokens", 0), duration=(end_time - start_time).total_seconds(), ) @staticmethod def _get_openai_metrics(line_results: List[LineResult], aggr_results: AggregationResult): node_run_infos = _get_node_run_infos(line_results, aggr_results) total_metrics = {} calculator = OpenAIMetricsCalculator() for run_info in node_run_infos: metrics = SystemMetrics._try_get_openai_metrics(run_info) if metrics: calculator.merge_metrics_dict(total_metrics, metrics) else: api_calls = run_info.api_calls or [] for call in api_calls: metrics = calculator.get_openai_metrics_from_api_call(call) calculator.merge_metrics_dict(total_metrics, metrics) return total_metrics def _try_get_openai_metrics(run_info: RunInfo): openai_metrics = {} if run_info.system_metrics: for metric in ["total_tokens", "prompt_tokens", "completion_tokens"]: if metric not in run_info.system_metrics: return False openai_metrics[metric] = run_info.system_metrics[metric] return openai_metrics def to_dict(self): return { "total_tokens": self.total_tokens, "prompt_tokens": self.prompt_tokens, "completion_tokens": self.completion_tokens, "duration": self.duration, } @dataclass class BatchResult: """The result of a batch run.""" status: Status total_lines: int completed_lines: int failed_lines: int node_status: Mapping[str, int] start_time: datetime end_time: datetime metrics: Mapping[str, str] system_metrics: SystemMetrics error_summary: ErrorSummary @classmethod def create( cls, start_time: datetime, end_time: datetime, line_results: List[LineResult], aggr_result: AggregationResult, status: Status = Status.Completed, ) -> "BatchResult": total_lines = len(line_results) completed_lines = sum(line_result.run_info.status == Status.Completed for line_result in line_results) failed_lines = total_lines - completed_lines return cls( status=status, total_lines=total_lines, completed_lines=completed_lines, failed_lines=failed_lines, node_status=BatchResult._get_node_status(line_results, aggr_result), start_time=start_time, end_time=end_time, metrics=aggr_result.metrics, system_metrics=SystemMetrics.create(start_time, end_time, line_results, aggr_result), error_summary=ErrorSummary.create(line_results, aggr_result), ) @staticmethod def _get_node_status(line_results: List[LineResult], aggr_result: AggregationResult): node_run_infos = _get_node_run_infos(line_results, aggr_result) node_status = {} for node_run_info in node_run_infos: key = f"{node_run_info.node}.{node_run_info.status.value.lower()}" node_status[key] = node_status.get(key, 0) + 1 return node_status def _get_node_run_infos(line_results: List[LineResult], aggr_result: AggregationResult): line_node_run_infos = ( node_run_info for line_result in line_results for node_run_info in line_result.node_run_infos.values() ) aggr_node_run_infos = (node_run_info for node_run_info in aggr_result.node_run_infos.values()) return chain(line_node_run_infos, aggr_node_run_infos)
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/batch/_python_executor_proxy.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from pathlib import Path from typing import Any, List, Mapping, Optional from promptflow._core._errors import UnexpectedError from promptflow._core.operation_context import OperationContext from promptflow._core.run_tracker import RunTracker from promptflow._utils.logger_utils import bulk_logger from promptflow.batch._base_executor_proxy import AbstractExecutorProxy from promptflow.contracts.run_mode import RunMode from promptflow.executor import FlowExecutor from promptflow.executor._line_execution_process_pool import LineExecutionProcessPool from promptflow.executor._result import AggregationResult, LineResult from promptflow.executor._script_executor import ScriptExecutor from promptflow.storage._run_storage import AbstractRunStorage class PythonExecutorProxy(AbstractExecutorProxy): def __init__(self, flow_executor: FlowExecutor): self._flow_executor = flow_executor @classmethod async def create( cls, flow_file: Path, working_dir: Optional[Path] = None, *, connections: Optional[dict] = None, entry: Optional[str] = None, storage: Optional[AbstractRunStorage] = None, **kwargs, ) -> "PythonExecutorProxy": flow_executor = FlowExecutor.create( flow_file, connections, working_dir, entry=entry, storage=storage, raise_ex=False ) return cls(flow_executor) async def exec_aggregation_async( self, batch_inputs: Mapping[str, Any], aggregation_inputs: Mapping[str, Any], run_id: Optional[str] = None, ) -> AggregationResult: with self._flow_executor._run_tracker.node_log_manager: return self._flow_executor._exec_aggregation(batch_inputs, aggregation_inputs, run_id=run_id) def _exec_batch( self, batch_inputs: List[Mapping[str, Any]], output_dir: Path, run_id: Optional[str] = None, batch_timeout_sec: Optional[int] = None, line_timeout_sec: Optional[int] = None, ) -> List[LineResult]: # TODO: Refine the logic here since the script executor actually doesn't have the 'node' concept if isinstance(self._flow_executor, ScriptExecutor): run_tracker = RunTracker(self._flow_executor._storage) else: run_tracker = self._flow_executor._run_tracker with run_tracker.node_log_manager: OperationContext.get_instance().run_mode = RunMode.Batch.name if self._flow_executor._flow_file is None: raise UnexpectedError( "Unexpected error occurred while init FlowExecutor. Error details: flow file is missing." ) if batch_timeout_sec: bulk_logger.info(f"The timeout for the batch run is {batch_timeout_sec} seconds.") with LineExecutionProcessPool( self._flow_executor, len(batch_inputs), run_id, output_dir, batch_timeout_sec=batch_timeout_sec, line_timeout_sec=line_timeout_sec, ) as pool: line_number = [batch_input["line_number"] for batch_input in batch_inputs] line_results = pool.run(zip(line_number, batch_inputs)) # For bulk run, currently we need to add line results to run_tracker self._flow_executor._add_line_results(line_results, run_tracker) return line_results def get_inputs_definition(self): return self._flow_executor.get_inputs_definition() @classmethod def _get_tool_metadata(cls, flow_file: Path, working_dir: Path) -> dict: from promptflow._sdk._utils import generate_flow_tools_json return generate_flow_tools_json( flow_directory=working_dir, dump=False, used_packages_only=True, )
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/batch/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # flake8: noqa from ._base_executor_proxy import AbstractExecutorProxy, APIBasedExecutorProxy from ._batch_engine import BatchEngine from ._csharp_executor_proxy import CSharpExecutorProxy from ._python_executor_proxy import PythonExecutorProxy from ._result import BatchResult __all__ = [ "AbstractExecutorProxy", "APIBasedExecutorProxy", "BatchEngine", "CSharpExecutorProxy", "PythonExecutorProxy", "BatchResult", ]
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/batch/_batch_inputs_processor.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import re from pathlib import Path from typing import Any, Dict, List, Mapping, Optional from promptflow._constants import LINE_NUMBER_KEY from promptflow._core._errors import UnexpectedError from promptflow._utils.load_data import load_data from promptflow._utils.logger_utils import logger from promptflow._utils.multimedia_utils import resolve_multimedia_data_recursively from promptflow._utils.utils import resolve_dir_to_absolute from promptflow.batch._errors import EmptyInputsData, InputMappingError from promptflow.contracts.flow import FlowInputDefinition class BatchInputsProcessor: def __init__( self, working_dir: Path, flow_inputs: Mapping[str, FlowInputDefinition], max_lines_count: Optional[int] = None, ): self._working_dir = working_dir self._max_lines_count = max_lines_count self._flow_inputs = flow_inputs self._default_inputs_mapping = {key: f"${{data.{key}}}" for key in flow_inputs} def process_batch_inputs(self, input_dirs: Dict[str, str], inputs_mapping: Dict[str, str]): input_dicts = self._resolve_input_data(input_dirs) no_input_data = all(len(data) == 0 for data in input_dicts.values()) if no_input_data: input_dirs_str = "\n".join(f"{input}: {Path(path).as_posix()}" for input, path in input_dirs.items()) message_format = ( "Couldn't find any inputs data at the given input paths. Please review the provided path " "and consider resubmitting.\n{input_dirs}" ) raise EmptyInputsData(message_format=message_format, input_dirs=input_dirs_str) return self._validate_and_apply_inputs_mapping(input_dicts, inputs_mapping) def _resolve_input_data(self, input_dirs: Dict[str, str]): """Resolve input data from input dirs""" result = {} for input_key, input_dir in input_dirs.items(): input_dir = resolve_dir_to_absolute(self._working_dir, input_dir) result[input_key] = self._resolve_data_from_input_path(input_dir) return result def _resolve_data_from_input_path(self, input_path: Path): """Resolve input data from directory""" result = [] if input_path.is_file(): result.extend(resolve_multimedia_data_recursively( input_path.parent, load_data(local_path=input_path, max_rows_count=self._max_lines_count)) ) else: for input_file in input_path.rglob("*"): if input_file.is_file(): result.extend(resolve_multimedia_data_recursively( input_file.parent, load_data(local_path=input_file, max_rows_count=self._max_lines_count)) ) if self._max_lines_count and len(result) >= self._max_lines_count: break if self._max_lines_count and len(result) >= self._max_lines_count: logger.warning( ( "The data provided exceeds the maximum lines limit. Currently, only the first " f"{self._max_lines_count} lines are processed." ) ) return result[: self._max_lines_count] return result def _validate_and_apply_inputs_mapping(self, inputs, inputs_mapping) -> List[Dict[str, Any]]: """Validate and apply inputs mapping for all lines in the flow. :param inputs: The inputs to the flow. :type inputs: Any :param inputs_mapping: The mapping of input names to their corresponding values. :type inputs_mapping: Dict[str, Any] :return: A list of dictionaries containing the resolved inputs for each line in the flow. :rtype: List[Dict[str, Any]] """ if not inputs_mapping: logger.warning( msg=( "Starting run without column mapping may lead to unexpected results. " "Please consult the following documentation for more information: https://aka.ms/pf/column-mapping" ) ) inputs_mapping = self._complete_inputs_mapping_by_default_value(inputs_mapping) resolved_inputs = self._apply_inputs_mapping_for_all_lines(inputs, inputs_mapping) return resolved_inputs def _complete_inputs_mapping_by_default_value(self, inputs_mapping): inputs_mapping = inputs_mapping or {} result_mapping = self._default_inputs_mapping # For input has default value, we don't try to read data from default mapping. # Default value is in higher priority than default mapping. for key, value in self._flow_inputs.items(): if value and value.default is not None: del result_mapping[key] result_mapping.update(inputs_mapping) return result_mapping def _apply_inputs_mapping_for_all_lines( self, input_dict: Mapping[str, List[Mapping[str, Any]]], inputs_mapping: Mapping[str, str], ) -> List[Dict[str, Any]]: """Apply input mapping to all input lines. For example: input_dict = { 'data': [{'question': 'q1', 'answer': 'ans1'}, {'question': 'q2', 'answer': 'ans2'}], 'baseline': [{'answer': 'baseline_ans1'}, {'answer': 'baseline_ans2'}], 'output': [{'answer': 'output_ans1', 'line_number': 0}, {'answer': 'output_ans2', 'line_number': 1}], } inputs_mapping: { "question": "${data.question}", # Question from the data "groundtruth": "${data.answer}", # Answer from the data "baseline": "${baseline.answer}", # Answer from the baseline "deployment_name": "text-davinci-003", # literal value "answer": "${output.answer}", # Answer from the output "line_number": "${output.line_number}", # Answer from the output } Returns: [{ "question": "q1", "groundtruth": "ans1", "baseline": "baseline_ans1", "answer": "output_ans1", "deployment_name": "text-davinci-003", "line_number": 0, }, { "question": "q2", "groundtruth": "ans2", "baseline": "baseline_ans2", "answer": "output_ans2", "deployment_name": "text-davinci-003", "line_number": 1, }] """ if inputs_mapping is None: # This exception should not happen since developers need to use _default_inputs_mapping for None input. # So, this exception is one system error. raise UnexpectedError( message_format=( "The input for batch run is incorrect. Please make sure to set up a proper input mapping before " "proceeding. If you need additional help, feel free to contact support for further assistance." ) ) merged_list = self._merge_input_dicts_by_line(input_dict) if len(merged_list) == 0: raise InputMappingError( message_format=( "The input for batch run is incorrect. Could not find one complete line on the provided input. " "Please ensure that you supply data on the same line to resolve this issue." ) ) result = [apply_inputs_mapping(item, inputs_mapping) for item in merged_list] return result def _merge_input_dicts_by_line( self, input_dict: Mapping[str, List[Mapping[str, Any]]], ) -> List[Mapping[str, Mapping[str, Any]]]: for input_key, list_of_one_input in input_dict.items(): if not list_of_one_input: raise InputMappingError( message_format=( "The input for batch run is incorrect. Input from key '{input_key}' is an empty list, " "which means we cannot generate a single line input for the flow run. " "Please rectify the input and try again." ), input_key=input_key, ) # Check if line numbers are aligned. all_lengths_without_line_number = { input_key: len(list_of_one_input) for input_key, list_of_one_input in input_dict.items() if not any(LINE_NUMBER_KEY in one_item for one_item in list_of_one_input) } if len(set(all_lengths_without_line_number.values())) > 1: raise InputMappingError( message_format=( "The input for batch run is incorrect. Line numbers are not aligned. " "Some lists have dictionaries missing the 'line_number' key, " "and the lengths of these lists are different. " "List lengths are: {all_lengths_without_line_number}. " "Please make sure these lists have the same length or add 'line_number' key to each dictionary." ), all_lengths_without_line_number=all_lengths_without_line_number, ) # Collect each line item from each input. tmp_dict = {} for input_key, list_of_one_input in input_dict.items(): if input_key in all_lengths_without_line_number: # Assume line_number start from 0. for index, one_line_item in enumerate(list_of_one_input): if index not in tmp_dict: tmp_dict[index] = {} tmp_dict[index][input_key] = one_line_item else: for one_line_item in list_of_one_input: if LINE_NUMBER_KEY in one_line_item: index = one_line_item[LINE_NUMBER_KEY] if index not in tmp_dict: tmp_dict[index] = {} tmp_dict[index][input_key] = one_line_item result = [] for line, values_for_one_line in tmp_dict.items(): # Missing input is not acceptable line. if len(values_for_one_line) != len(input_dict): continue values_for_one_line[LINE_NUMBER_KEY] = line result.append(values_for_one_line) return result def apply_inputs_mapping( inputs: Mapping[str, Mapping[str, Any]], inputs_mapping: Mapping[str, str], ) -> Dict[str, Any]: """Apply input mapping to inputs for new contract. .. admonition:: Examples .. code-block:: python inputs: { "data": {"answer": "I'm fine, thank you.", "question": "How are you?"}, "baseline": {"answer": "The weather is good."}, } inputs_mapping: { "question": "${data.question}", "groundtruth": "${data.answer}", "baseline": "${baseline.answer}", "deployment_name": "literal_value", } Returns: { "question": "How are you?", "groundtruth": "I'm fine, thank you." "baseline": "The weather is good.", "deployment_name": "literal_value", } :param inputs: A mapping of input keys to their corresponding values. :type inputs: Mapping[str, Mapping[str, Any]] :param inputs_mapping: A mapping of input keys to their corresponding mapping expressions. :type inputs_mapping: Mapping[str, str] :return: A dictionary of input keys to their corresponding mapped values. :rtype: Dict[str, Any] :raises InputMappingError: If any of the input mapping relations are not found in the inputs. """ result = {} notfound_mapping_relations = [] for map_to_key, map_value in inputs_mapping.items(): # Ignore reserved key configuration from input mapping. if map_to_key == LINE_NUMBER_KEY: continue if not isinstance(map_value, str): # All non-string values are literal values. result[map_to_key] = map_value continue match = re.search(r"^\${([^{}]+)}$", map_value) if match is not None: pattern = match.group(1) # Could also try each pair of key value from inputs to match the pattern. # But split pattern by '.' is one deterministic way. # So, give key with less '.' higher priority. splitted_str = pattern.split(".") find_match = False for i in range(1, len(splitted_str)): key = ".".join(splitted_str[:i]) source = ".".join(splitted_str[i:]) if key in inputs and source in inputs[key]: find_match = True result[map_to_key] = inputs[key][source] break if not find_match: notfound_mapping_relations.append(map_value) else: result[map_to_key] = map_value # Literal value # Return all not found mapping relations in one exception to provide better debug experience. if notfound_mapping_relations: invalid_relations = ", ".join(notfound_mapping_relations) raise InputMappingError( message_format=( "The input for batch run is incorrect. Couldn't find these mapping relations: {invalid_relations}. " "Please make sure your input mapping keys and values match your YAML input section and input data. " "For more information, refer to the following documentation: https://aka.ms/pf/column-mapping" ), invalid_relations=invalid_relations, ) # For PRS scenario, apply_inputs_mapping will be used for exec_line and line_number is not necessary. if LINE_NUMBER_KEY in inputs: result[LINE_NUMBER_KEY] = inputs[LINE_NUMBER_KEY] return result
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/batch/_batch_engine.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import asyncio import signal import threading import uuid from datetime import datetime from pathlib import Path from typing import Any, Dict, List, Mapping, Optional from promptflow._constants import LINE_NUMBER_KEY, LINE_TIMEOUT_SEC, FlowLanguage from promptflow._core._errors import UnexpectedError from promptflow._core.operation_context import OperationContext from promptflow._utils.async_utils import async_run_allowing_running_loop from promptflow._utils.context_utils import _change_working_dir from promptflow._utils.execution_utils import ( apply_default_value_for_input, collect_lines, get_aggregation_inputs_properties, handle_line_failures, ) from promptflow._utils.logger_utils import bulk_logger from promptflow._utils.utils import ( dump_list_to_jsonl, get_int_env_var, log_progress, resolve_dir_to_absolute, transpose, ) from promptflow._utils.yaml_utils import load_yaml from promptflow.batch._base_executor_proxy import AbstractExecutorProxy from promptflow.batch._batch_inputs_processor import BatchInputsProcessor from promptflow.batch._csharp_executor_proxy import CSharpExecutorProxy from promptflow.batch._python_executor_proxy import PythonExecutorProxy from promptflow.batch._result import BatchResult from promptflow.contracts.flow import Flow from promptflow.contracts.run_info import Status from promptflow.exceptions import ErrorTarget, PromptflowException from promptflow.executor._errors import InvalidFlowFileError from promptflow.executor._line_execution_process_pool import signal_handler from promptflow.executor._result import AggregationResult, LineResult from promptflow.executor.flow_validator import FlowValidator from promptflow.storage._run_storage import AbstractRunStorage OUTPUT_FILE_NAME = "output.jsonl" # TODO: will remain consistent with PF_WORKER_COUNT in the future DEFAULT_CONCURRENCY = 10 class BatchEngine: """This class is used to execute flows in batch mode""" executor_proxy_classes: Mapping[str, AbstractExecutorProxy] = { FlowLanguage.Python: PythonExecutorProxy, FlowLanguage.CSharp: CSharpExecutorProxy, } @classmethod def register_executor(cls, type: str, executor_proxy_cls: AbstractExecutorProxy): """Register a executor proxy class for a specific program language. This method allows users to register a executor proxy class for a particular programming language. The executor proxy class will be used when creating an instance of the BatchEngine for flows written in the specified language. :param type: The flow program language of the executor proxy, :type type: str :param executor_proxy_cls: The executor proxy class to be registered. :type executor_proxy_cls: ~promptflow.batch.AbstractExecutorProxy """ cls.executor_proxy_classes[type] = executor_proxy_cls def __init__( self, flow_file: Path, working_dir: Optional[Path] = None, *, connections: Optional[dict] = None, entry: Optional[str] = None, storage: Optional[AbstractRunStorage] = None, batch_timeout_sec: Optional[int] = None, **kwargs, ): """Create a new batch engine instance :param flow_file: The flow file path :type flow_file: Path :param working_dir: The flow working directory path :type working_dir: Optional[Path] :param connections: The connections used in the flow :type connections: Optional[dict] :param storage: The storage to store execution results :type storage: Optional[~promptflow.storage._run_storage.AbstractRunStorage] :param batch_timeout: The timeout of batch run in seconds :type batch_timeout: Optional[int] :param kwargs: The keyword arguments related to creating the executor proxy class :type kwargs: Any """ self._flow_file = flow_file self._working_dir = Flow._resolve_working_dir(flow_file, working_dir) if self._is_eager_flow_yaml(): if Path(flow_file).suffix.lower() in [".yaml", ".yml"]: entry, path = self._parse_eager_flow_yaml() self._flow_file = Path(path) self._is_dag_yaml_flow = False self._program_language = FlowLanguage.Python elif Path(flow_file).suffix.lower() in [".yaml", ".yml"]: self._flow = Flow.from_yaml(flow_file, working_dir=self._working_dir) FlowValidator.ensure_flow_valid_in_batch_mode(self._flow) self._is_dag_yaml_flow = True self._program_language = self._flow.program_language else: raise InvalidFlowFileError(message_format="Unsupported flow file type: {flow_file}.", flow_file=flow_file) self._connections = connections self._entry = entry self._storage = storage self._kwargs = kwargs self._batch_timeout_sec = ( batch_timeout_sec if batch_timeout_sec else get_int_env_var("PF_BATCH_TIMEOUT_SEC", None) ) self._line_timeout_sec = get_int_env_var("PF_LINE_TIMEOUT_SEC", LINE_TIMEOUT_SEC) # set it to True when the batch run is canceled self._is_canceled = False def run( self, input_dirs: Dict[str, str], inputs_mapping: Dict[str, str], output_dir: Path, run_id: Optional[str] = None, max_lines_count: Optional[int] = None, raise_on_line_failure: Optional[bool] = False, ) -> BatchResult: """Run flow in batch mode :param input_dirs: The directories path of input files :type input_dirs: Dict[str, str] :param inputs_mapping: The mapping of input names to their corresponding values. :type inputs_mapping: Dict[str, str] :param output_dir: output dir :type output_dir: The directory path of output files :param run_id: The run id of this run :type run_id: Optional[str] :param max_lines_count: The max count of inputs. If it is None, all inputs will be used. :type max_lines_count: Optional[int] :param raise_on_line_failure: Whether to raise exception when a line fails. :type raise_on_line_failure: Optional[bool] :return: The result of this batch run :rtype: ~promptflow.batch._result.BatchResult """ try: self._start_time = datetime.utcnow() with _change_working_dir(self._working_dir): # create executor proxy instance according to the flow program language executor_proxy_cls = self.executor_proxy_classes[self._program_language] self._executor_proxy: AbstractExecutorProxy = async_run_allowing_running_loop( executor_proxy_cls.create, self._flow_file, self._working_dir, connections=self._connections, entry=self._entry, storage=self._storage, **self._kwargs, ) try: # register signal handler for python flow in the main thread # TODO: For all executor proxies that are executed locally, it might be necessary to # register a signal for Ctrl+C in order to customize some actions beyond just killing # the process, such as terminating the executor service. if isinstance(self._executor_proxy, PythonExecutorProxy): if threading.current_thread() is threading.main_thread(): signal.signal(signal.SIGINT, signal_handler) else: bulk_logger.info( "Current thread is not main thread, skip signal handler registration in BatchEngine." ) # set batch input source from input mapping OperationContext.get_instance().set_batch_input_source_from_inputs_mapping(inputs_mapping) # if using eager flow, the self._flow is none, so we need to get inputs definition from executor inputs = ( self._flow.inputs if self._is_dag_yaml_flow else self._executor_proxy.get_inputs_definition() ) # resolve input data from input dirs and apply inputs mapping batch_input_processor = BatchInputsProcessor(self._working_dir, inputs, max_lines_count) batch_inputs = batch_input_processor.process_batch_inputs(input_dirs, inputs_mapping) # resolve output dir output_dir = resolve_dir_to_absolute(self._working_dir, output_dir) # run flow in batch mode return async_run_allowing_running_loop( self._exec_in_task, batch_inputs, run_id, output_dir, raise_on_line_failure ) finally: async_run_allowing_running_loop(self._executor_proxy.destroy) except Exception as e: bulk_logger.error(f"Error occurred while executing batch run. Exception: {str(e)}") if isinstance(e, PromptflowException): raise e else: # for unexpected error, we need to wrap it to SystemErrorException to allow us to see the stack trace. unexpected_error = UnexpectedError( target=ErrorTarget.BATCH, message_format=( "Unexpected error occurred while executing the batch run. Error: {error_type_and_message}." ), error_type_and_message=f"({e.__class__.__name__}) {e}", ) raise unexpected_error from e def cancel(self): """Cancel the batch run""" self._is_canceled = True async def _exec_in_task( self, batch_inputs: List[Dict[str, Any]], run_id: str = None, output_dir: Path = None, raise_on_line_failure: bool = False, ) -> BatchResult: # if the batch run is canceled, asyncio.CancelledError will be raised and no results will be returned, # so we pass empty line results list and aggr results and update them in _exec so that when the batch # run is canceled we can get the current completed line results and aggr results. line_results: List[LineResult] = [] aggr_result = AggregationResult({}, {}, {}) task = asyncio.create_task( self._exec(line_results, aggr_result, batch_inputs, run_id, output_dir, raise_on_line_failure) ) while not task.done(): # check whether the task is completed or canceled every 1s await asyncio.sleep(1) if self._is_canceled: task.cancel() # use current completed line results and aggregation results to create a BatchResult return BatchResult.create( self._start_time, datetime.utcnow(), line_results, aggr_result, status=Status.Canceled ) return task.result() async def _exec( self, line_results: List[LineResult], aggr_result: AggregationResult, batch_inputs: List[Dict[str, Any]], run_id: str = None, output_dir: Path = None, raise_on_line_failure: bool = False, ) -> BatchResult: # ensure executor health before execution await self._executor_proxy.ensure_executor_health() # apply default value in early stage, so we can use it both in line and aggregation nodes execution. # if the flow is None, we don't need to apply default value for inputs. if self._is_dag_yaml_flow: batch_inputs = [ apply_default_value_for_input(self._flow.inputs, each_line_input) for each_line_input in batch_inputs ] run_id = run_id or str(uuid.uuid4()) # execute lines if isinstance(self._executor_proxy, PythonExecutorProxy): line_results.extend( self._executor_proxy._exec_batch( batch_inputs, output_dir, run_id, batch_timeout_sec=self._batch_timeout_sec, line_timeout_sec=self._line_timeout_sec, ) ) else: await self._exec_batch(line_results, batch_inputs, run_id) handle_line_failures([r.run_info for r in line_results], raise_on_line_failure) # persist outputs to output dir outputs = [ {LINE_NUMBER_KEY: r.run_info.index, **r.output} for r in line_results if r.run_info.status == Status.Completed ] outputs.sort(key=lambda x: x[LINE_NUMBER_KEY]) self._persist_outputs(outputs, output_dir) # execute aggregation nodes aggr_exec_result = await self._exec_aggregation(batch_inputs, line_results, run_id) # use the execution result to update aggr_result to make sure we can get the aggr_result in _exec_in_task self._update_aggr_result(aggr_result, aggr_exec_result) # summary some infos from line results and aggr results to batch result return BatchResult.create(self._start_time, datetime.utcnow(), line_results, aggr_result) async def _exec_batch( self, line_results: List[LineResult], batch_inputs: List[Mapping[str, Any]], run_id: Optional[str] = None, ) -> List[LineResult]: worker_count = get_int_env_var("PF_WORKER_COUNT", DEFAULT_CONCURRENCY) semaphore = asyncio.Semaphore(worker_count) pending = [ asyncio.create_task(self._exec_line_under_semaphore(semaphore, line_inputs, i, run_id)) for i, line_inputs in enumerate(batch_inputs) ] total_lines = len(batch_inputs) completed_line = 0 while completed_line < total_lines: done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) completed_line_results = [task.result() for task in done] self._persist_run_info(completed_line_results) line_results.extend(completed_line_results) log_progress( self._start_time, bulk_logger, len(line_results), total_lines, last_log_count=completed_line, ) completed_line = len(line_results) async def _exec_line_under_semaphore( self, semaphore, inputs: Mapping[str, Any], index: Optional[int] = None, run_id: Optional[str] = None, ): async with semaphore: return await self._executor_proxy.exec_line_async(inputs, index, run_id) async def _exec_aggregation( self, batch_inputs: List[dict], line_results: List[LineResult], run_id: Optional[str] = None, ) -> AggregationResult: if not self._is_dag_yaml_flow: return AggregationResult({}, {}, {}) aggregation_nodes = {node.name for node in self._flow.nodes if node.aggregation} if not aggregation_nodes: return AggregationResult({}, {}, {}) bulk_logger.info("Executing aggregation nodes...") run_infos = [r.run_info for r in line_results] succeeded = [i for i, r in enumerate(run_infos) if r.status == Status.Completed] succeeded_batch_inputs = [batch_inputs[i] for i in succeeded] resolved_succeeded_batch_inputs = [ FlowValidator.ensure_flow_inputs_type(flow=self._flow, inputs=input) for input in succeeded_batch_inputs ] succeeded_inputs = transpose(resolved_succeeded_batch_inputs, keys=list(self._flow.inputs.keys())) aggregation_inputs = transpose( [result.aggregation_inputs for result in line_results], keys=get_aggregation_inputs_properties(self._flow), ) succeeded_aggregation_inputs = collect_lines(succeeded, aggregation_inputs) try: aggr_result = await self._executor_proxy.exec_aggregation_async( succeeded_inputs, succeeded_aggregation_inputs, run_id ) # if the flow language is python, we have already persisted node run infos during execution. # so we should persist node run infos in aggr_result for other languages. if not isinstance(self._executor_proxy, PythonExecutorProxy): for node_run in aggr_result.node_run_infos.values(): self._storage.persist_node_run(node_run) bulk_logger.info("Finish executing aggregation nodes.") return aggr_result except PromptflowException as e: # for PromptflowException, we already do classification, so throw directly. raise e except Exception as e: error_type_and_message = f"({e.__class__.__name__}) {e}" raise UnexpectedError( message_format=( "Unexpected error occurred while executing the aggregated nodes. " "Please fix or contact support for assistance. The error details: {error_type_and_message}." ), error_type_and_message=error_type_and_message, ) from e def _persist_run_info(self, line_results: List[LineResult]): """Persist node run infos and flow run info in line result to storage""" for line_result in line_results: for node_run in line_result.node_run_infos.values(): self._storage.persist_node_run(node_run) self._storage.persist_flow_run(line_result.run_info) def _persist_outputs(self, outputs: List[Mapping[str, Any]], output_dir: Path): """Persist outputs to json line file in output directory""" output_file = output_dir / OUTPUT_FILE_NAME dump_list_to_jsonl(output_file, outputs) def _update_aggr_result(self, aggr_result: AggregationResult, aggr_exec_result: AggregationResult): """Update aggregation result with the aggregation execution result""" aggr_result.metrics = aggr_exec_result.metrics aggr_result.node_run_infos = aggr_exec_result.node_run_infos aggr_result.output = aggr_exec_result.output def _is_eager_flow_yaml(self): if Path(self._flow_file).suffix.lower() == ".py": return True elif Path(self._flow_file).suffix.lower() in [".yaml", ".yml"]: flow_file = self._working_dir / self._flow_file if self._working_dir else self._flow_file with open(flow_file, "r", encoding="utf-8") as fin: flow_dag = load_yaml(fin) if "entry" in flow_dag: return True return False def _parse_eager_flow_yaml(self): flow_file = self._working_dir / self._flow_file if self._working_dir else self._flow_file with open(flow_file, "r", encoding="utf-8") as fin: flow_dag = load_yaml(fin) return flow_dag.get("entry", ""), flow_dag.get("path", "")
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/batch/_csharp_executor_proxy.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import json import socket import subprocess import uuid from pathlib import Path from typing import Any, Mapping, Optional from promptflow._core._errors import MetaFileNotFound, MetaFileReadError from promptflow._sdk._constants import DEFAULT_ENCODING, FLOW_TOOLS_JSON, PROMPT_FLOW_DIR_NAME from promptflow.batch._base_executor_proxy import APIBasedExecutorProxy from promptflow.executor._result import AggregationResult from promptflow.storage._run_storage import AbstractRunStorage EXECUTOR_SERVICE_DOMAIN = "http://localhost:" EXECUTOR_SERVICE_DLL = "Promptflow.dll" class CSharpExecutorProxy(APIBasedExecutorProxy): def __init__(self, process: subprocess.Popen, port: str): self._process = process self._port = port @property def api_endpoint(self) -> str: return EXECUTOR_SERVICE_DOMAIN + self._port @classmethod async def create( cls, flow_file: Path, working_dir: Optional[Path] = None, *, connections: Optional[dict] = None, storage: Optional[AbstractRunStorage] = None, **kwargs, ) -> "CSharpExecutorProxy": """Create a new executor""" port = cls.find_available_port() log_path = kwargs.get("log_path", "") init_error_file = Path(working_dir) / f"init_error_{str(uuid.uuid4())}.json" init_error_file.touch() command = [ "dotnet", EXECUTOR_SERVICE_DLL, "-e", "-p", port, "--yaml_path", flow_file, "--assembly_folder", ".", "--log_path", log_path, "--log_level", "Warning", "--error_file_path", init_error_file, ] process = subprocess.Popen(command) executor_proxy = cls(process, port) try: await executor_proxy.ensure_executor_startup(init_error_file) finally: Path(init_error_file).unlink() return executor_proxy async def destroy(self): """Destroy the executor""" if self._process and self._process.poll() is None: self._process.terminate() try: self._process.wait(timeout=5) except subprocess.TimeoutExpired: self._process.kill() async def exec_aggregation_async( self, batch_inputs: Mapping[str, Any], aggregation_inputs: Mapping[str, Any], run_id: Optional[str] = None, ) -> AggregationResult: return AggregationResult({}, {}, {}) def _is_executor_active(self): """Check if the process is still running and return False if it has exited""" # get the exit code of the process by poll() and if it is None, it means the process is still running return self._process.poll() is None @classmethod def _get_tool_metadata(cls, flow_file: Path, working_dir: Path) -> dict: flow_tools_json_path = working_dir / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON if flow_tools_json_path.is_file(): with open(flow_tools_json_path, mode="r", encoding=DEFAULT_ENCODING) as f: try: return json.load(f) except json.JSONDecodeError: raise MetaFileReadError( message_format="Failed to fetch meta of tools: {file_path} is not a valid json file.", file_path=flow_tools_json_path.absolute().as_posix(), ) raise MetaFileNotFound( message_format=( "Failed to fetch meta of tools: cannot find {file_path}, please build the flow project first." ), file_path=flow_tools_json_path.absolute().as_posix(), ) @classmethod def find_available_port(cls) -> str: """Find an available port on localhost""" with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind(("localhost", 0)) _, port = s.getsockname() return str(port)
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/batch/_errors.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException, ValidationException class InputMappingError(ValidationException): def __init__(self, target: ErrorTarget = ErrorTarget.EXECUTOR, **kwargs): super().__init__(target=target, **kwargs) class EmptyInputsData(UserErrorException): pass class ExecutorServiceUnhealthy(SystemErrorException): pass
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/entities/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # isort: skip_file # skip to avoid circular import __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore from promptflow._sdk.entities._connection import ( AzureContentSafetyConnection, AzureOpenAIConnection, CognitiveSearchConnection, CustomConnection, OpenAIConnection, SerpConnection, QdrantConnection, FormRecognizerConnection, ) from promptflow._sdk.entities._run import Run from promptflow._core.tool import InputSetting, DynamicList from promptflow._sdk.entities._flow import FlowContext __all__ = [ # region Connection "AzureContentSafetyConnection", "AzureOpenAIConnection", "OpenAIConnection", "CustomConnection", "CognitiveSearchConnection", "SerpConnection", "QdrantConnection", "FormRecognizerConnection", # endregion # region Run "Run", # endregion # region Tool "InputSetting", "DynamicList", # endregion # region Flow "FlowContext", # endregion ]
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_internal/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore # flake8: noqa """Put some imports here for internal packages to minimize the effort of refactoring.""" from promptflow._constants import PROMPTFLOW_CONNECTIONS from promptflow._core._errors import GenerateMetaUserError, PackageToolNotFoundError, ToolExecutionError from promptflow._core.cache_manager import AbstractCacheManager, CacheManager, enable_cache from promptflow._core.connection_manager import ConnectionManager from promptflow._core.flow_execution_context import FlowExecutionContext from promptflow._core.log_manager import NodeLogManager, NodeLogWriter from promptflow._core.metric_logger import add_metric_logger from promptflow._core.openai_injector import inject_openai_api from promptflow._core.operation_context import OperationContext from promptflow._core.run_tracker import RunRecordNotFound, RunTracker from promptflow._core.tool import ToolInvoker, ToolProvider, tool from promptflow._core.tool_meta_generator import ( JinjaParsingError, MultipleToolsDefined, NoToolDefined, PythonParsingError, ReservedVariableCannotBeUsed, generate_prompt_meta, generate_python_meta, generate_tool_meta_dict_by_file, is_tool, ) from promptflow._core.tools_manager import ( BuiltinsManager, CustomPythonToolLoadError, EmptyCodeInCustomTool, MissingTargetFunction, ToolsManager, builtins, collect_package_tools, gen_dynamic_list, register_apis, register_builtins, register_connections, retrieve_tool_func_result, ) from promptflow._core.tracer import Tracer from promptflow._sdk._constants import LOCAL_MGMT_DB_PATH from promptflow._sdk._serving.response_creator import ResponseCreator from promptflow._sdk._serving.swagger import generate_swagger from promptflow._sdk._serving.utils import ( get_output_fields_to_remove, get_sample_json, handle_error_to_response, load_request_data, streaming_response_required, validate_request_data, ) from promptflow._sdk._utils import ( get_used_connection_names_from_environment_variables, setup_user_agent_to_operation_context, update_environment_variables_with_connections, ) from promptflow._utils.context_utils import _change_working_dir, inject_sys_path from promptflow._utils.credential_scrubber import CredentialScrubber from promptflow._utils.dataclass_serializer import deserialize_dataclass, serialize from promptflow._utils.exception_utils import ( ErrorResponse, ExceptionPresenter, JsonSerializedPromptflowException, RootErrorCode, infer_error_code_from_class, ) from promptflow._utils.execution_utils import handle_line_failures from promptflow._utils.feature_utils import Feature, FeatureState, get_feature_list from promptflow._utils.logger_utils import ( DATETIME_FORMAT, LOG_FORMAT, CredentialScrubberFormatter, FileHandler, FileHandlerConcurrentWrapper, LogContext, bulk_logger, flow_logger, get_logger, logger, update_log_path, ) from promptflow._utils.multimedia_data_converter import ( AbstractMultimediaInfoConverter, MultimediaConverter, MultimediaInfo, ResourceType, ) from promptflow._utils.multimedia_utils import ( _create_image_from_file, convert_multimedia_data_to_base64, is_multimedia_dict, persist_multimedia_data, resolve_multimedia_data_recursively, ) from promptflow._utils.utils import ( AttrDict, camel_to_snake, count_and_log_progress, load_json, reverse_transpose, set_context, transpose, ) from promptflow._version import VERSION from promptflow.batch._batch_inputs_processor import apply_inputs_mapping from promptflow.executor._errors import InputNotFound from promptflow.executor._tool_invoker import DefaultToolInvoker from promptflow.storage._run_storage import DefaultRunStorage
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_sdk/_user_agent.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from promptflow._version import VERSION USER_AGENT = "{}/{}".format("promptflow-sdk", VERSION)
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_sdk/_constants.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import os from enum import Enum from pathlib import Path LOGGER_NAME = "promptflow" PROMPT_FLOW_HOME_DIR_ENV_VAR = "PF_HOME_DIRECTORY" PROMPT_FLOW_DIR_NAME = ".promptflow" def _prepare_home_dir() -> Path: """Prepare prompt flow home directory. User can configure it by setting environment variable: `PF_HOME_DIRECTORY`; if not configured, or configured value is not valid, use default value: "~/.promptflow/". """ from promptflow._utils.logger_utils import get_cli_sdk_logger logger = get_cli_sdk_logger() if PROMPT_FLOW_HOME_DIR_ENV_VAR in os.environ: logger.debug( f"environment variable {PROMPT_FLOW_HOME_DIR_ENV_VAR!r} is set, honor it preparing home directory." ) try: pf_home_dir = Path(os.getenv(PROMPT_FLOW_HOME_DIR_ENV_VAR)).resolve() pf_home_dir.mkdir(parents=True, exist_ok=True) return pf_home_dir except Exception as e: # pylint: disable=broad-except _warning_message = ( "Invalid configuration for prompt flow home directory: " f"{os.getenv(PROMPT_FLOW_HOME_DIR_ENV_VAR)!r}: {str(e)!r}.\n" 'Fall back to use default value: "~/.promptflow/".' ) logger.warning(_warning_message) try: logger.debug("preparing home directory with default value.") pf_home_dir = (Path.home() / PROMPT_FLOW_DIR_NAME).resolve() pf_home_dir.mkdir(parents=True, exist_ok=True) return pf_home_dir except Exception as e: # pylint: disable=broad-except _error_message = ( f"Cannot create prompt flow home directory: {str(e)!r}.\n" "Please check if you have proper permission to operate the directory " f"{HOME_PROMPT_FLOW_DIR.as_posix()!r}; or configure it via " f"environment variable {PROMPT_FLOW_HOME_DIR_ENV_VAR!r}.\n" ) logger.error(_error_message) raise Exception(_error_message) HOME_PROMPT_FLOW_DIR = _prepare_home_dir() DAG_FILE_NAME = "flow.dag.yaml" NODE_VARIANTS = "node_variants" VARIANTS = "variants" NODES = "nodes" NODE = "node" INPUTS = "inputs" USE_VARIANTS = "use_variants" DEFAULT_VAR_ID = "default_variant_id" FLOW_TOOLS_JSON = "flow.tools.json" FLOW_TOOLS_JSON_GEN_TIMEOUT = 60 PROMPT_FLOW_RUNS_DIR_NAME = ".runs" PROMPT_FLOW_EXP_DIR_NAME = ".exps" SERVICE_CONFIG_FILE = "pf.yaml" PF_SERVICE_PORT_FILE = "pfs.port" PF_SERVICE_LOG_FILE = "pfs.log" LOCAL_MGMT_DB_PATH = (HOME_PROMPT_FLOW_DIR / "pf.sqlite").resolve() LOCAL_MGMT_DB_SESSION_ACQUIRE_LOCK_PATH = (HOME_PROMPT_FLOW_DIR / "pf.sqlite.lock").resolve() SCHEMA_INFO_TABLENAME = "schema_info" RUN_INFO_TABLENAME = "run_info" RUN_INFO_CREATED_ON_INDEX_NAME = "idx_run_info_created_on" CONNECTION_TABLE_NAME = "connection" EXPERIMENT_TABLE_NAME = "experiment" EXPERIMENT_CREATED_ON_INDEX_NAME = "idx_experiment_created_on" BASE_PATH_CONTEXT_KEY = "base_path" SCHEMA_KEYS_CONTEXT_CONFIG_KEY = "schema_configs_keys" SCHEMA_KEYS_CONTEXT_SECRET_KEY = "schema_secrets_keys" PARAMS_OVERRIDE_KEY = "params_override" FILE_PREFIX = "file:" KEYRING_SYSTEM = "promptflow" KEYRING_ENCRYPTION_KEY_NAME = "encryption_key" KEYRING_ENCRYPTION_LOCK_PATH = (HOME_PROMPT_FLOW_DIR / "encryption_key.lock").resolve() REFRESH_CONNECTIONS_DIR_LOCK_PATH = (HOME_PROMPT_FLOW_DIR / "refresh_connections_dir.lock").resolve() # Note: Use this only for show. Reading input should regard all '*' string as scrubbed, no matter the length. SCRUBBED_VALUE = "******" SCRUBBED_VALUE_NO_CHANGE = "<no-change>" SCRUBBED_VALUE_USER_INPUT = "<user-input>" CHAT_HISTORY = "chat_history" WORKSPACE_LINKED_DATASTORE_NAME = "workspaceblobstore" LINE_NUMBER = "line_number" AZUREML_PF_RUN_PROPERTIES_LINEAGE = "azureml.promptflow.input_run_id" AZURE_WORKSPACE_REGEX_FORMAT = ( "^azureml:[/]{1,2}subscriptions/([^/]+)/resource(groups|Groups)/([^/]+)" "(/providers/Microsoft.MachineLearningServices)?/workspaces/([^/]+)$" ) DEFAULT_ENCODING = "utf-8" LOCAL_STORAGE_BATCH_SIZE = 1 LOCAL_SERVICE_PORT = 5000 BULK_RUN_ERRORS = "BulkRunErrors" RUN_MACRO = "${run}" VARIANT_ID_MACRO = "${variant_id}" TIMESTAMP_MACRO = "${timestamp}" DEFAULT_VARIANT = "variant_0" # run visualize constants VIS_HTML_TMPL = Path(__file__).parent / "data" / "visualize.j2" VIS_JS_BUNDLE_FILENAME = "bulkTestDetails.min.js" VIS_PORTAL_URL_TMPL = ( "https://ml.azure.com/prompts/flow/bulkrun/runs/outputs" "?wsid=/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}" "/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}&runId={names}" ) REMOTE_URI_PREFIX = "azureml:" REGISTRY_URI_PREFIX = "azureml://registries/" FLOW_RESOURCE_ID_PREFIX = "azureml://locations/" FLOW_DIRECTORY_MACRO_IN_CONFIG = "${flow_directory}" # Tool meta info UIONLY_HIDDEN = "uionly_hidden" SKIP_FUNC_PARAMS = ["subscription_id", "resource_group_name", "workspace_name"] ICON_DARK = "icon_dark" ICON_LIGHT = "icon_light" ICON = "icon" TOOL_SCHEMA = Path(__file__).parent / "data" / "tool.schema.json" class CustomStrongTypeConnectionConfigs: PREFIX = "promptflow.connection." TYPE = "custom_type" MODULE = "module" PACKAGE = "package" PACKAGE_VERSION = "package_version" PROMPTFLOW_TYPE_KEY = PREFIX + TYPE PROMPTFLOW_MODULE_KEY = PREFIX + MODULE PROMPTFLOW_PACKAGE_KEY = PREFIX + PACKAGE PROMPTFLOW_PACKAGE_VERSION_KEY = PREFIX + PACKAGE_VERSION @staticmethod def is_custom_key(key): return key not in [ CustomStrongTypeConnectionConfigs.PROMPTFLOW_TYPE_KEY, CustomStrongTypeConnectionConfigs.PROMPTFLOW_MODULE_KEY, CustomStrongTypeConnectionConfigs.PROMPTFLOW_PACKAGE_KEY, CustomStrongTypeConnectionConfigs.PROMPTFLOW_PACKAGE_VERSION_KEY, ] class RunTypes: BATCH = "batch" EVALUATION = "evaluation" PAIRWISE_EVALUATE = "pairwise_evaluate" class AzureRunTypes: """Run types for run entity from index service.""" BATCH = "azureml.promptflow.FlowRun" EVALUATION = "azureml.promptflow.EvaluationRun" PAIRWISE_EVALUATE = "azureml.promptflow.PairwiseEvaluationRun" class RestRunTypes: """Run types for run entity from MT service.""" BATCH = "FlowRun" EVALUATION = "EvaluationRun" PAIRWISE_EVALUATE = "PairwiseEvaluationRun" # run document statuses class RunStatus(object): # Ordered by transition order QUEUED = "Queued" NOT_STARTED = "NotStarted" PREPARING = "Preparing" PROVISIONING = "Provisioning" STARTING = "Starting" RUNNING = "Running" CANCEL_REQUESTED = "CancelRequested" CANCELED = "Canceled" FINALIZING = "Finalizing" COMPLETED = "Completed" FAILED = "Failed" UNAPPROVED = "Unapproved" NOTRESPONDING = "NotResponding" PAUSING = "Pausing" PAUSED = "Paused" @classmethod def list(cls): """Return the list of supported run statuses.""" return [ cls.QUEUED, cls.PREPARING, cls.PROVISIONING, cls.STARTING, cls.RUNNING, cls.CANCEL_REQUESTED, cls.CANCELED, cls.FINALIZING, cls.COMPLETED, cls.FAILED, cls.NOT_STARTED, cls.UNAPPROVED, cls.NOTRESPONDING, cls.PAUSING, cls.PAUSED, ] @classmethod def get_running_statuses(cls): """Return the list of running statuses.""" return [ cls.NOT_STARTED, cls.QUEUED, cls.PREPARING, cls.PROVISIONING, cls.STARTING, cls.RUNNING, cls.UNAPPROVED, cls.NOTRESPONDING, cls.PAUSING, cls.PAUSED, ] @classmethod def get_post_processing_statuses(cls): """Return the list of running statuses.""" return [cls.CANCEL_REQUESTED, cls.FINALIZING] class FlowRunProperties: FLOW_PATH = "flow_path" OUTPUT_PATH = "output_path" NODE_VARIANT = "node_variant" RUN = "run" SYSTEM_METRICS = "system_metrics" class CommonYamlFields: """Common yaml fields. Common yaml fields are used to define the common fields in yaml files. It can be one of the following values: type, name, $schema. """ TYPE = "type" """Type.""" NAME = "name" """Name.""" SCHEMA = "$schema" """Schema.""" MAX_LIST_CLI_RESULTS = 50 # general list MAX_RUN_LIST_RESULTS = 50 # run list MAX_SHOW_DETAILS_RESULTS = 100 # show details class CLIListOutputFormat: JSON = "json" TABLE = "table" class LocalStorageFilenames: SNAPSHOT_FOLDER = "snapshot" DAG = DAG_FILE_NAME FLOW_TOOLS_JSON = FLOW_TOOLS_JSON INPUTS = "inputs.jsonl" OUTPUTS = "outputs.jsonl" DETAIL = "detail.json" METRICS = "metrics.json" LOG = "logs.txt" EXCEPTION = "error.json" META = "meta.json" class ListViewType(str, Enum): ACTIVE_ONLY = "ActiveOnly" ARCHIVED_ONLY = "ArchivedOnly" ALL = "All" def get_list_view_type(archived_only: bool, include_archived: bool) -> ListViewType: if archived_only and include_archived: raise Exception("Cannot provide both archived-only and include-archived.") if include_archived: return ListViewType.ALL elif archived_only: return ListViewType.ARCHIVED_ONLY else: return ListViewType.ACTIVE_ONLY class RunInfoSources(str, Enum): """Run sources.""" LOCAL = "local" INDEX_SERVICE = "index_service" RUN_HISTORY = "run_history" MT_SERVICE = "mt_service" EXISTING_RUN = "existing_run" class ConfigValueType(str, Enum): STRING = "String" SECRET = "Secret" class ConnectionType(str, Enum): _NOT_SET = "NotSet" AZURE_OPEN_AI = "AzureOpenAI" OPEN_AI = "OpenAI" QDRANT = "Qdrant" COGNITIVE_SEARCH = "CognitiveSearch" SERP = "Serp" AZURE_CONTENT_SAFETY = "AzureContentSafety" FORM_RECOGNIZER = "FormRecognizer" WEAVIATE = "Weaviate" CUSTOM = "Custom" ALL_CONNECTION_TYPES = set( map(lambda x: f"{x.value}Connection", filter(lambda x: x != ConnectionType._NOT_SET, ConnectionType)) ) class ConnectionFields(str, Enum): CONNECTION = "connection" DEPLOYMENT_NAME = "deployment_name" MODEL = "model" SUPPORTED_CONNECTION_FIELDS = { ConnectionFields.CONNECTION.value, ConnectionFields.DEPLOYMENT_NAME.value, ConnectionFields.MODEL.value, } class RunDataKeys: PORTAL_URL = "portal_url" DATA = "data" RUN = "run" OUTPUT = "output" class RunHistoryKeys: RunMetaData = "runMetadata" HIDDEN = "hidden" class ConnectionProvider(str, Enum): LOCAL = "local" AZUREML = "azureml" class FlowType: STANDARD = "standard" EVALUATION = "evaluation" CHAT = "chat" @staticmethod def get_all_values(): values = [value for key, value in vars(FlowType).items() if isinstance(value, str) and key.isupper()] return values CLIENT_FLOW_TYPE_2_SERVICE_FLOW_TYPE = { FlowType.STANDARD: "default", FlowType.EVALUATION: "evaluation", FlowType.CHAT: "chat", } SERVICE_FLOW_TYPE_2_CLIENT_FLOW_TYPE = {value: key for key, value in CLIENT_FLOW_TYPE_2_SERVICE_FLOW_TYPE.items()} class AzureFlowSource: LOCAL = "local" PF_SERVICE = "pf_service" INDEX = "index" class DownloadedRun: SNAPSHOT_FOLDER = LocalStorageFilenames.SNAPSHOT_FOLDER METRICS_FILE_NAME = LocalStorageFilenames.METRICS LOGS_FILE_NAME = LocalStorageFilenames.LOG RUN_METADATA_FILE_NAME = "run_metadata.json" class ExperimentNodeType(object): FLOW = "flow" CODE = "code" class ExperimentStatus(object): NOT_STARTED = "NotStarted" IN_PROGRESS = "InProgress" TERMINATED = "Terminated"
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_sdk/__init__.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_sdk/_visualize_functions.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import shutil import tempfile import webbrowser from dataclasses import asdict from pathlib import Path from typing import Optional from promptflow._sdk._constants import VIS_HTML_TMPL, VIS_JS_BUNDLE_FILENAME from promptflow._sdk._utils import render_jinja_template from promptflow.contracts._run_management import VisualizationRender def generate_html_string(data: dict) -> str: visualization_render = VisualizationRender(data=data) return render_jinja_template(VIS_HTML_TMPL, **asdict(visualization_render)) def try_to_open_html(html_path: str) -> None: print(f"The HTML file is generated at {str(Path(html_path).resolve().absolute())!r}.") print("Trying to view the result in a web browser...") web_browser_opened = False web_browser_opened = webbrowser.open(f"file://{html_path}") if not web_browser_opened: print( f"Failed to visualize from the web browser, the HTML file locates at {html_path!r}.\n" "You can manually open it with your web browser, or try SDK to visualize it." ) else: print("Successfully visualized from the web browser.") def dump_js_bundle(html_path: str) -> None: js_bundle_src_path = Path(__file__).parent / "data" / VIS_JS_BUNDLE_FILENAME js_bundle_dst_path = Path(html_path).parent / VIS_JS_BUNDLE_FILENAME shutil.copy(js_bundle_src_path, js_bundle_dst_path) def dump_html(html_string: str, html_path: Optional[str] = None, open_html: bool = True) -> None: if html_path is not None: with open(html_path, "w") as f: f.write(html_string) else: with tempfile.NamedTemporaryFile(prefix="pf-visualize-detail-", suffix=".html", delete=False) as f: f.write(html_string.encode("utf-8")) html_path = f.name dump_js_bundle(html_path) if open_html: try_to_open_html(html_path)
0
promptflow_repo/promptflow/src/promptflow/promptflow
promptflow_repo/promptflow/src/promptflow/promptflow/_sdk/_run_functions.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from os import PathLike from typing import IO, AnyStr, Union from promptflow._sdk._load_functions import load_run from promptflow._sdk._pf_client import PFClient from promptflow._sdk.entities._run import Run def _create_run(run: Run, **kwargs): client = PFClient() return client.runs.create_or_update(run=run, **kwargs) def create_yaml_run(source: Union[str, PathLike, IO[AnyStr]], params_override: list = None, **kwargs): """Create a run from a yaml file. Should only call from CLI.""" run = load_run(source, params_override=params_override, **kwargs) return _create_run(run=run, **kwargs)
0