text
stringlengths
8
1.72M
id
stringlengths
22
143
metadata
dict
__index_level_0__
int64
0
104
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import pytest from azure.core.exceptions import HttpResponseError from promptflow._sdk._orm import RunInfo from promptflow.exceptions import _ErrorInfo, ErrorCategory, ErrorTarget, UserErrorException from promptflow.executor import FlowValidator from promptflow.executor._errors import InvalidNodeReference FLOWS_DIR = "./tests/test_configs/flows/print_input_flow" @pytest.mark.unittest class TestExceptions: def test_error_category_with_unknow_error(self, pf): ex = None try: pf.run("./exceptions/flows") except Exception as e: ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == ErrorCategory.UNKNOWN assert error_type == "FileNotFoundError" assert error_target == ErrorTarget.UNKNOWN assert error_message == "" assert ( "module=promptflow._sdk._pf_client, " 'code=raise FileNotFoundError(f"flow path {flow} does not exist"), ' "lineno=" ) in error_detail def test_error_category_with_user_error(self, pf): ex = None try: RunInfo.get("run_name") except Exception as e: ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == ErrorCategory.USER_ERROR assert error_type == "RunNotFoundError" assert error_target == ErrorTarget.CONTROL_PLANE_SDK assert error_message == "" assert ( "module=promptflow._sdk._orm.run_info, " 'code=raise RunNotFoundError(f"Run name {name!r} cannot be found."), ' "lineno=" ) in error_detail def test_error_category_with_system_error(self): ex = None try: FlowValidator._validate_aggregation_inputs({}, {"input1": "value1"}) except Exception as e: ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == ErrorCategory.SYSTEM_ERROR assert error_type == "InvalidAggregationInput" assert error_target == ErrorTarget.UNKNOWN assert error_message == ( "The input for aggregation is incorrect. " "The value for aggregated reference input '{input_key}' should be a list, " "but received {value_type}. " "Please adjust the input value to match the expected format." ) assert ( "module=promptflow.executor.flow_validator, " "code=raise InvalidAggregationInput(, " "lineno=" ) in error_detail def test_error_category_with_http_error(self, subscription_id, resource_group_name, workspace_name): try: raise HttpResponseError(message="HttpResponseError") except Exception as e: ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == ErrorCategory.UNKNOWN assert error_type == "HttpResponseError" assert error_target == ErrorTarget.UNKNOWN assert error_message == "" assert error_detail == "" @pytest.mark.parametrize( "status_code, expected_error_category", [ (203, ErrorCategory.UNKNOWN), (304, ErrorCategory.UNKNOWN), (400, ErrorCategory.UNKNOWN), (401, ErrorCategory.UNKNOWN), (429, ErrorCategory.UNKNOWN), (500, ErrorCategory.UNKNOWN), ], ) def test_error_category_with_status_code(self, status_code, expected_error_category): try: raise Exception() except Exception as e: e.status_code = status_code ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == expected_error_category assert error_type == "Exception" assert error_target == ErrorTarget.UNKNOWN assert error_message == "" assert error_detail == "" def test_error_category_with_executor_error(self): try: msg_format = ( "Invalid node definitions found in the flow graph. Non-aggregation node '{invalid_reference}' " "cannot be referenced in the activate config of the aggregation node '{node_name}'. Please " "review and rectify the node reference." ) raise InvalidNodeReference(message_format=msg_format, invalid_reference=None, node_name="node_name") except Exception as e: ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == ErrorCategory.USER_ERROR assert error_type == "InvalidNodeReference" assert error_target == ErrorTarget.EXECUTOR assert error_message == ( "Invalid node definitions found in the flow graph. Non-aggregation node '{invalid_reference}' " "cannot be referenced in the activate config of the aggregation node '{node_name}'. Please " "review and rectify the node reference." ) assert error_detail == "" def test_error_category_with_cause_exception1(self): """cause exception is PromptflowException and e is PromptflowException, recording e.""" ex = None try: try: FlowValidator._validate_aggregation_inputs({}, {"input1": "value1"}) except Exception as e: raise UserErrorException("FlowValidator._validate_aggregation_inputs failed") from e except Exception as e: ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == ErrorCategory.USER_ERROR assert error_type == "InvalidAggregationInput" assert error_target == ErrorTarget.UNKNOWN assert error_message == "" assert error_detail == "" ex = None try: try: FlowValidator._validate_aggregation_inputs({}, {"input1": "value1"}) except Exception as e: raise UserErrorException(message=str(e), error=e) except Exception as e: ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == ErrorCategory.USER_ERROR assert error_type == "InvalidAggregationInput" assert error_target == ErrorTarget.UNKNOWN assert error_message == "" assert error_detail == "" def test_error_category_with_cause_exception2(self): """cause exception is PromptflowException and e is not PromptflowException, recording cause exception.""" ex = None try: try: FlowValidator._validate_aggregation_inputs({}, {"input1": "value1"}) except Exception as e: raise Exception("FlowValidator._validate_aggregation_inputs failed") from e except Exception as e: ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == ErrorCategory.SYSTEM_ERROR assert error_type == "InvalidAggregationInput" assert error_target == ErrorTarget.UNKNOWN assert error_message == ( "The input for aggregation is incorrect. The value for aggregated reference " "input '{input_key}' should be a list, but received {value_type}. Please " "adjust the input value to match the expected format." ) assert ( "module=promptflow.executor.flow_validator, " "code=raise InvalidAggregationInput(, " "lineno=" ) in error_detail def test_error_category_with_cause_exception3(self, pf): """cause exception is not PromptflowException and e is not PromptflowException, recording e exception.""" ex = None try: try: pf.run("./exceptions/flows") except Exception as e: raise Exception("pf run failed") from e except Exception as e: ex = e error_category, error_type, error_target, error_message, error_detail = _ErrorInfo.get_error_info(ex) assert error_category == ErrorCategory.UNKNOWN assert error_type == "Exception" assert error_target == ErrorTarget.UNKNOWN assert error_message == "" assert error_detail == ""
promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_exceptions.py/0
{ "file_path": "promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_exceptions.py", "repo_id": "promptflow", "token_count": 3605 }
53
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import json import tempfile import uuid from pathlib import Path import mock import pytest from sdk_cli_azure_test.recording_utilities import is_replay from promptflow import PFClient from promptflow._sdk.entities import CustomConnection from ..utils import PFSOperations, check_activity_end_telemetry def create_custom_connection(client: PFClient) -> str: name = str(uuid.uuid4()) connection = CustomConnection(name=name, configs={"api_base": "test"}, secrets={"api_key": "test"}) client.connections.create_or_update(connection) return name @pytest.mark.e2etest class TestConnectionAPIs: def test_list_connections(self, pf_client: PFClient, pfs_op: PFSOperations) -> None: create_custom_connection(pf_client) with check_activity_end_telemetry(activity_name="pf.connections.list"): connections = pfs_op.list_connections().json assert len(connections) >= 1 def test_get_connection(self, pf_client: PFClient, pfs_op: PFSOperations) -> None: name = create_custom_connection(pf_client) with check_activity_end_telemetry(activity_name="pf.connections.get"): conn_from_pfs = pfs_op.get_connection(name=name, status_code=200).json assert conn_from_pfs["name"] == name assert conn_from_pfs["configs"]["api_base"] == "test" assert "api_key" in conn_from_pfs["secrets"] # get connection with secret with check_activity_end_telemetry(activity_name="pf.connections.get"): conn_from_pfs = pfs_op.get_connection_with_secret(name=name, status_code=200).json assert not conn_from_pfs["secrets"]["api_key"].startswith("*") def test_delete_connection(self, pf_client: PFClient, pfs_op: PFSOperations) -> None: len_connections = len(pfs_op.list_connections().json) name = create_custom_connection(pf_client) with check_activity_end_telemetry( expected_activities=[ {"activity_name": "pf.connections.delete", "first_call": True}, ] ): pfs_op.delete_connection(name=name, status_code=204) len_connections_after = len(pfs_op.list_connections().json) assert len_connections_after == len_connections def test_list_connection_with_invalid_user(self, pfs_op: PFSOperations) -> None: # TODO: should we record telemetry for this case? with check_activity_end_telemetry(expected_activities=[]): conn_from_pfs = pfs_op.connection_operation_with_invalid_user() assert conn_from_pfs.status_code == 403 def test_get_connection_specs(self, pfs_op: PFSOperations) -> None: with check_activity_end_telemetry(expected_activities=[]): specs = pfs_op.get_connection_specs(status_code=200).json assert len(specs) > 1 @pytest.mark.skipif(is_replay(), reason="connection provider test, skip in non-live mode.") def test_get_connection_by_provicer(self, pfs_op, subscription_id, resource_group_name, workspace_name): target = "promptflow._sdk._pf_client.Configuration.get_connection_provider" provider_url_target = ( "promptflow._sdk.operations._local_azure_connection_operations." "LocalAzureConnectionOperations._extract_workspace" ) mock_provider_url = (subscription_id, resource_group_name, workspace_name) with mock.patch(target) as mocked_config, mock.patch(provider_url_target) as mocked_provider_url: mocked_config.return_value = "azureml" mocked_provider_url.return_value = mock_provider_url connections = pfs_op.list_connections(status_code=200).json assert len(connections) > 0 connection = pfs_op.get_connection(name=connections[0]["name"], status_code=200).json assert connection["name"] == connections[0]["name"] target = "promptflow._sdk._pf_client.Configuration.get_config" with tempfile.TemporaryDirectory() as temp: config_file = Path(temp) / ".azureml" / "config.json" config_file.parent.mkdir(parents=True, exist_ok=True) with open(config_file, "w") as f: config = { "subscription_id": subscription_id, "resource_group": resource_group_name, "workspace_name": workspace_name, } json.dump(config, f) with mock.patch(target) as mocked_config: mocked_config.return_value = "azureml" connections = pfs_op.list_connections_by_provider(working_dir=temp, status_code=200).json assert len(connections) > 0 connection = pfs_op.get_connections_by_provider( name=connections[0]["name"], working_dir=temp, status_code=200 ).json assert connection["name"] == connections[0]["name"] # this test checked 2 cases: # 1. if the working directory is not exist, it should return 400 # 2. working directory has been encoded and decoded correctly, so that previous call may pass validation error_message = pfs_op.list_connections_by_provider( working_dir=temp + "not exist", status_code=400 ).json assert error_message == { "errors": {"working_directory": "Invalid working directory."}, "message": "Input payload validation failed", }
promptflow/src/promptflow/tests/sdk_pfs_test/e2etests/test_connection_apis.py/0
{ "file_path": "promptflow/src/promptflow/tests/sdk_pfs_test/e2etests/test_connection_apis.py", "repo_id": "promptflow", "token_count": 2392 }
54
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json name: my_open_ai_connection type: open_ai api_key: "<to-be-replaced>" organization: "org" base_url: ""
promptflow/src/promptflow/tests/test_configs/connections/openai_connection.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/connections/openai_connection.yaml", "repo_id": "promptflow", "token_count": 78 }
55
{"text":"data_5000"} {"text":"data_5001"} {"text":"data_5002"} {"text":"data_5003"} {"text":"data_5004"} {"text":"data_5005"} {"text":"data_5006"} {"text":"data_5007"} {"text":"data_5008"} {"text":"data_5009"} {"text":"data_5010"} {"text":"data_5011"} {"text":"data_5012"} {"text":"data_5013"} {"text":"data_5014"} {"text":"data_5015"} {"text":"data_5016"} {"text":"data_5017"} {"text":"data_5018"} {"text":"data_5019"} {"text":"data_5020"} {"text":"data_5021"} {"text":"data_5022"} {"text":"data_5023"} {"text":"data_5024"} {"text":"data_5025"} {"text":"data_5026"} {"text":"data_5027"} {"text":"data_5028"} {"text":"data_5029"} {"text":"data_5030"} {"text":"data_5031"} {"text":"data_5032"} {"text":"data_5033"} {"text":"data_5034"} {"text":"data_5035"} {"text":"data_5036"} {"text":"data_5037"} {"text":"data_5038"} {"text":"data_5039"} {"text":"data_5040"} {"text":"data_5041"} {"text":"data_5042"} {"text":"data_5043"} {"text":"data_5044"} {"text":"data_5045"} {"text":"data_5046"} {"text":"data_5047"} {"text":"data_5048"} {"text":"data_5049"} {"text":"data_5050"} {"text":"data_5051"} {"text":"data_5052"} {"text":"data_5053"} {"text":"data_5054"} {"text":"data_5055"} {"text":"data_5056"} {"text":"data_5057"} {"text":"data_5058"} {"text":"data_5059"} {"text":"data_5060"} {"text":"data_5061"} {"text":"data_5062"} {"text":"data_5063"} {"text":"data_5064"} {"text":"data_5065"} {"text":"data_5066"} {"text":"data_5067"} {"text":"data_5068"} {"text":"data_5069"} {"text":"data_5070"} {"text":"data_5071"} {"text":"data_5072"} {"text":"data_5073"} {"text":"data_5074"} {"text":"data_5075"} {"text":"data_5076"} {"text":"data_5077"} {"text":"data_5078"} {"text":"data_5079"} {"text":"data_5080"} {"text":"data_5081"} {"text":"data_5082"} {"text":"data_5083"} {"text":"data_5084"} {"text":"data_5085"} {"text":"data_5086"} {"text":"data_5087"} {"text":"data_5088"} {"text":"data_5089"} {"text":"data_5090"} {"text":"data_5091"} {"text":"data_5092"} {"text":"data_5093"} {"text":"data_5094"} {"text":"data_5095"} {"text":"data_5096"} {"text":"data_5097"} {"text":"data_5098"} {"text":"data_5099"} {"text":"data_5100"} {"text":"data_5101"} {"text":"data_5102"} {"text":"data_5103"} {"text":"data_5104"} {"text":"data_5105"} {"text":"data_5106"} {"text":"data_5107"} {"text":"data_5108"} {"text":"data_5109"} {"text":"data_5110"} {"text":"data_5111"} {"text":"data_5112"} {"text":"data_5113"} {"text":"data_5114"} {"text":"data_5115"} {"text":"data_5116"} {"text":"data_5117"} {"text":"data_5118"} {"text":"data_5119"} {"text":"data_5120"} {"text":"data_5121"} {"text":"data_5122"} {"text":"data_5123"} {"text":"data_5124"} {"text":"data_5125"} {"text":"data_5126"} {"text":"data_5127"} {"text":"data_5128"} {"text":"data_5129"} {"text":"data_5130"} {"text":"data_5131"} {"text":"data_5132"} {"text":"data_5133"} {"text":"data_5134"} {"text":"data_5135"} {"text":"data_5136"} {"text":"data_5137"} {"text":"data_5138"} {"text":"data_5139"} {"text":"data_5140"} {"text":"data_5141"} {"text":"data_5142"} {"text":"data_5143"} {"text":"data_5144"} {"text":"data_5145"} {"text":"data_5146"} {"text":"data_5147"} {"text":"data_5148"} {"text":"data_5149"} {"text":"data_5150"} {"text":"data_5151"} {"text":"data_5152"} {"text":"data_5153"} {"text":"data_5154"} {"text":"data_5155"} {"text":"data_5156"} {"text":"data_5157"} {"text":"data_5158"} {"text":"data_5159"} {"text":"data_5160"} {"text":"data_5161"} {"text":"data_5162"} {"text":"data_5163"} {"text":"data_5164"} {"text":"data_5165"} {"text":"data_5166"} {"text":"data_5167"} {"text":"data_5168"} {"text":"data_5169"} {"text":"data_5170"} {"text":"data_5171"} {"text":"data_5172"} {"text":"data_5173"} {"text":"data_5174"} {"text":"data_5175"} {"text":"data_5176"} {"text":"data_5177"} {"text":"data_5178"} {"text":"data_5179"} {"text":"data_5180"} {"text":"data_5181"} {"text":"data_5182"} {"text":"data_5183"} {"text":"data_5184"} {"text":"data_5185"} {"text":"data_5186"} {"text":"data_5187"} {"text":"data_5188"} {"text":"data_5189"} {"text":"data_5190"} {"text":"data_5191"} {"text":"data_5192"} {"text":"data_5193"} {"text":"data_5194"} {"text":"data_5195"} {"text":"data_5196"} {"text":"data_5197"} {"text":"data_5198"} {"text":"data_5199"} {"text":"data_5200"} {"text":"data_5201"} {"text":"data_5202"} {"text":"data_5203"} {"text":"data_5204"} {"text":"data_5205"} {"text":"data_5206"} {"text":"data_5207"} {"text":"data_5208"} {"text":"data_5209"} {"text":"data_5210"} {"text":"data_5211"} {"text":"data_5212"} {"text":"data_5213"} {"text":"data_5214"} {"text":"data_5215"} {"text":"data_5216"} {"text":"data_5217"} {"text":"data_5218"} {"text":"data_5219"} {"text":"data_5220"} {"text":"data_5221"} {"text":"data_5222"} {"text":"data_5223"} {"text":"data_5224"} {"text":"data_5225"} {"text":"data_5226"} {"text":"data_5227"} {"text":"data_5228"} {"text":"data_5229"} {"text":"data_5230"} {"text":"data_5231"} {"text":"data_5232"} {"text":"data_5233"} {"text":"data_5234"} {"text":"data_5235"} {"text":"data_5236"} {"text":"data_5237"} {"text":"data_5238"} {"text":"data_5239"} {"text":"data_5240"} {"text":"data_5241"} {"text":"data_5242"} {"text":"data_5243"} {"text":"data_5244"} {"text":"data_5245"} {"text":"data_5246"} {"text":"data_5247"} {"text":"data_5248"} {"text":"data_5249"} {"text":"data_5250"} {"text":"data_5251"} {"text":"data_5252"} {"text":"data_5253"} {"text":"data_5254"} {"text":"data_5255"} {"text":"data_5256"} {"text":"data_5257"} {"text":"data_5258"} {"text":"data_5259"} {"text":"data_5260"} {"text":"data_5261"} {"text":"data_5262"} {"text":"data_5263"} {"text":"data_5264"} {"text":"data_5265"} {"text":"data_5266"} {"text":"data_5267"} {"text":"data_5268"} {"text":"data_5269"} {"text":"data_5270"} {"text":"data_5271"} {"text":"data_5272"} {"text":"data_5273"} {"text":"data_5274"} {"text":"data_5275"} {"text":"data_5276"} {"text":"data_5277"} {"text":"data_5278"} {"text":"data_5279"} {"text":"data_5280"} {"text":"data_5281"} {"text":"data_5282"} {"text":"data_5283"} {"text":"data_5284"} {"text":"data_5285"} {"text":"data_5286"} {"text":"data_5287"} {"text":"data_5288"} {"text":"data_5289"} {"text":"data_5290"} {"text":"data_5291"} {"text":"data_5292"} {"text":"data_5293"} {"text":"data_5294"} {"text":"data_5295"} {"text":"data_5296"} {"text":"data_5297"} {"text":"data_5298"} {"text":"data_5299"} {"text":"data_5300"} {"text":"data_5301"} {"text":"data_5302"} {"text":"data_5303"} {"text":"data_5304"} {"text":"data_5305"} {"text":"data_5306"} {"text":"data_5307"} {"text":"data_5308"} {"text":"data_5309"} {"text":"data_5310"} {"text":"data_5311"} {"text":"data_5312"} {"text":"data_5313"} {"text":"data_5314"} {"text":"data_5315"} {"text":"data_5316"} {"text":"data_5317"} {"text":"data_5318"} {"text":"data_5319"} {"text":"data_5320"} {"text":"data_5321"} {"text":"data_5322"} {"text":"data_5323"} {"text":"data_5324"} {"text":"data_5325"} {"text":"data_5326"} {"text":"data_5327"} {"text":"data_5328"} {"text":"data_5329"} {"text":"data_5330"} {"text":"data_5331"} {"text":"data_5332"} {"text":"data_5333"} {"text":"data_5334"} {"text":"data_5335"} {"text":"data_5336"} {"text":"data_5337"} {"text":"data_5338"} {"text":"data_5339"} {"text":"data_5340"} {"text":"data_5341"} {"text":"data_5342"} {"text":"data_5343"} {"text":"data_5344"} {"text":"data_5345"} {"text":"data_5346"} {"text":"data_5347"} {"text":"data_5348"} {"text":"data_5349"} {"text":"data_5350"} {"text":"data_5351"} {"text":"data_5352"} {"text":"data_5353"} {"text":"data_5354"} {"text":"data_5355"} {"text":"data_5356"} {"text":"data_5357"} {"text":"data_5358"} {"text":"data_5359"} {"text":"data_5360"} {"text":"data_5361"} {"text":"data_5362"} {"text":"data_5363"} {"text":"data_5364"} {"text":"data_5365"} {"text":"data_5366"} {"text":"data_5367"} {"text":"data_5368"} {"text":"data_5369"} {"text":"data_5370"} {"text":"data_5371"} {"text":"data_5372"} {"text":"data_5373"} {"text":"data_5374"} {"text":"data_5375"} {"text":"data_5376"} {"text":"data_5377"} {"text":"data_5378"} {"text":"data_5379"} {"text":"data_5380"} {"text":"data_5381"} {"text":"data_5382"} {"text":"data_5383"} {"text":"data_5384"} {"text":"data_5385"} {"text":"data_5386"} {"text":"data_5387"} {"text":"data_5388"} {"text":"data_5389"} {"text":"data_5390"} {"text":"data_5391"} {"text":"data_5392"} {"text":"data_5393"} {"text":"data_5394"} {"text":"data_5395"} {"text":"data_5396"} {"text":"data_5397"} {"text":"data_5398"} {"text":"data_5399"} {"text":"data_5400"} {"text":"data_5401"} {"text":"data_5402"} {"text":"data_5403"} {"text":"data_5404"} {"text":"data_5405"} {"text":"data_5406"} {"text":"data_5407"} {"text":"data_5408"} {"text":"data_5409"} {"text":"data_5410"} {"text":"data_5411"} {"text":"data_5412"} {"text":"data_5413"} {"text":"data_5414"} {"text":"data_5415"} {"text":"data_5416"} {"text":"data_5417"} {"text":"data_5418"} {"text":"data_5419"} {"text":"data_5420"} {"text":"data_5421"} {"text":"data_5422"} {"text":"data_5423"} {"text":"data_5424"} {"text":"data_5425"} {"text":"data_5426"} {"text":"data_5427"} {"text":"data_5428"} {"text":"data_5429"} {"text":"data_5430"} {"text":"data_5431"} {"text":"data_5432"} {"text":"data_5433"} {"text":"data_5434"} {"text":"data_5435"} {"text":"data_5436"} {"text":"data_5437"} {"text":"data_5438"} {"text":"data_5439"} {"text":"data_5440"} {"text":"data_5441"} {"text":"data_5442"} {"text":"data_5443"} {"text":"data_5444"} {"text":"data_5445"} {"text":"data_5446"} {"text":"data_5447"} {"text":"data_5448"} {"text":"data_5449"} {"text":"data_5450"} {"text":"data_5451"} {"text":"data_5452"} {"text":"data_5453"} {"text":"data_5454"} {"text":"data_5455"} {"text":"data_5456"} {"text":"data_5457"} {"text":"data_5458"} {"text":"data_5459"} {"text":"data_5460"} {"text":"data_5461"} {"text":"data_5462"} {"text":"data_5463"} {"text":"data_5464"} {"text":"data_5465"} {"text":"data_5466"} {"text":"data_5467"} {"text":"data_5468"} {"text":"data_5469"} {"text":"data_5470"} {"text":"data_5471"} {"text":"data_5472"} {"text":"data_5473"} {"text":"data_5474"} {"text":"data_5475"} {"text":"data_5476"} {"text":"data_5477"} {"text":"data_5478"} {"text":"data_5479"} {"text":"data_5480"} {"text":"data_5481"} {"text":"data_5482"} {"text":"data_5483"} {"text":"data_5484"} {"text":"data_5485"} {"text":"data_5486"} {"text":"data_5487"} {"text":"data_5488"} {"text":"data_5489"} {"text":"data_5490"} {"text":"data_5491"} {"text":"data_5492"} {"text":"data_5493"} {"text":"data_5494"} {"text":"data_5495"} {"text":"data_5496"} {"text":"data_5497"} {"text":"data_5498"} {"text":"data_5499"} {"text":"data_5500"} {"text":"data_5501"} {"text":"data_5502"} {"text":"data_5503"} {"text":"data_5504"} {"text":"data_5505"} {"text":"data_5506"} {"text":"data_5507"} {"text":"data_5508"} {"text":"data_5509"} {"text":"data_5510"} {"text":"data_5511"} {"text":"data_5512"} {"text":"data_5513"} {"text":"data_5514"} {"text":"data_5515"} {"text":"data_5516"} {"text":"data_5517"} {"text":"data_5518"} {"text":"data_5519"} {"text":"data_5520"} {"text":"data_5521"} {"text":"data_5522"} {"text":"data_5523"} {"text":"data_5524"} {"text":"data_5525"} {"text":"data_5526"} {"text":"data_5527"} {"text":"data_5528"} {"text":"data_5529"} {"text":"data_5530"} {"text":"data_5531"} {"text":"data_5532"} {"text":"data_5533"} {"text":"data_5534"} {"text":"data_5535"} {"text":"data_5536"} {"text":"data_5537"} {"text":"data_5538"} {"text":"data_5539"} {"text":"data_5540"} {"text":"data_5541"} {"text":"data_5542"} {"text":"data_5543"} {"text":"data_5544"} {"text":"data_5545"} {"text":"data_5546"} {"text":"data_5547"} {"text":"data_5548"} {"text":"data_5549"} {"text":"data_5550"} {"text":"data_5551"} {"text":"data_5552"} {"text":"data_5553"} {"text":"data_5554"} {"text":"data_5555"} {"text":"data_5556"} {"text":"data_5557"} {"text":"data_5558"} {"text":"data_5559"} {"text":"data_5560"} {"text":"data_5561"} {"text":"data_5562"} {"text":"data_5563"} {"text":"data_5564"} {"text":"data_5565"} {"text":"data_5566"} {"text":"data_5567"} {"text":"data_5568"} {"text":"data_5569"} {"text":"data_5570"} {"text":"data_5571"} {"text":"data_5572"} {"text":"data_5573"} {"text":"data_5574"} {"text":"data_5575"} {"text":"data_5576"} {"text":"data_5577"} {"text":"data_5578"} {"text":"data_5579"} {"text":"data_5580"} {"text":"data_5581"} {"text":"data_5582"} {"text":"data_5583"} {"text":"data_5584"} {"text":"data_5585"} {"text":"data_5586"} {"text":"data_5587"} {"text":"data_5588"} {"text":"data_5589"} {"text":"data_5590"} {"text":"data_5591"} {"text":"data_5592"} {"text":"data_5593"} {"text":"data_5594"} {"text":"data_5595"} {"text":"data_5596"} {"text":"data_5597"} {"text":"data_5598"} {"text":"data_5599"} {"text":"data_5600"} {"text":"data_5601"} {"text":"data_5602"} {"text":"data_5603"} {"text":"data_5604"} {"text":"data_5605"} {"text":"data_5606"} {"text":"data_5607"} {"text":"data_5608"} {"text":"data_5609"} {"text":"data_5610"} {"text":"data_5611"} {"text":"data_5612"} {"text":"data_5613"} {"text":"data_5614"} {"text":"data_5615"} {"text":"data_5616"} {"text":"data_5617"} {"text":"data_5618"} {"text":"data_5619"} {"text":"data_5620"} {"text":"data_5621"} {"text":"data_5622"} {"text":"data_5623"} {"text":"data_5624"} {"text":"data_5625"} {"text":"data_5626"} {"text":"data_5627"} {"text":"data_5628"} {"text":"data_5629"} {"text":"data_5630"} {"text":"data_5631"} {"text":"data_5632"} {"text":"data_5633"} {"text":"data_5634"} {"text":"data_5635"} {"text":"data_5636"} {"text":"data_5637"} {"text":"data_5638"} {"text":"data_5639"} {"text":"data_5640"} {"text":"data_5641"} {"text":"data_5642"} {"text":"data_5643"} {"text":"data_5644"} {"text":"data_5645"} {"text":"data_5646"} {"text":"data_5647"} {"text":"data_5648"} {"text":"data_5649"} {"text":"data_5650"} {"text":"data_5651"} {"text":"data_5652"} {"text":"data_5653"} {"text":"data_5654"} {"text":"data_5655"} {"text":"data_5656"} {"text":"data_5657"} {"text":"data_5658"} {"text":"data_5659"} {"text":"data_5660"} {"text":"data_5661"} {"text":"data_5662"} {"text":"data_5663"} {"text":"data_5664"} {"text":"data_5665"} {"text":"data_5666"} {"text":"data_5667"} {"text":"data_5668"} {"text":"data_5669"} {"text":"data_5670"} {"text":"data_5671"} {"text":"data_5672"} {"text":"data_5673"} {"text":"data_5674"} {"text":"data_5675"} {"text":"data_5676"} {"text":"data_5677"} {"text":"data_5678"} {"text":"data_5679"} {"text":"data_5680"} {"text":"data_5681"} {"text":"data_5682"} {"text":"data_5683"} {"text":"data_5684"} {"text":"data_5685"} {"text":"data_5686"} {"text":"data_5687"} {"text":"data_5688"} {"text":"data_5689"} {"text":"data_5690"} {"text":"data_5691"} {"text":"data_5692"} {"text":"data_5693"} {"text":"data_5694"} {"text":"data_5695"} {"text":"data_5696"} {"text":"data_5697"} {"text":"data_5698"} {"text":"data_5699"} {"text":"data_5700"} {"text":"data_5701"} {"text":"data_5702"} {"text":"data_5703"} {"text":"data_5704"} {"text":"data_5705"} {"text":"data_5706"} {"text":"data_5707"} {"text":"data_5708"} {"text":"data_5709"} {"text":"data_5710"} {"text":"data_5711"} {"text":"data_5712"} {"text":"data_5713"} {"text":"data_5714"} {"text":"data_5715"} {"text":"data_5716"} {"text":"data_5717"} {"text":"data_5718"} {"text":"data_5719"} {"text":"data_5720"} {"text":"data_5721"} {"text":"data_5722"} {"text":"data_5723"} {"text":"data_5724"} {"text":"data_5725"} {"text":"data_5726"} {"text":"data_5727"} {"text":"data_5728"} {"text":"data_5729"} {"text":"data_5730"} {"text":"data_5731"} {"text":"data_5732"} {"text":"data_5733"} {"text":"data_5734"} {"text":"data_5735"} {"text":"data_5736"} {"text":"data_5737"} {"text":"data_5738"} {"text":"data_5739"} {"text":"data_5740"} {"text":"data_5741"} {"text":"data_5742"} {"text":"data_5743"} {"text":"data_5744"} {"text":"data_5745"} {"text":"data_5746"} {"text":"data_5747"} {"text":"data_5748"} {"text":"data_5749"} {"text":"data_5750"} {"text":"data_5751"} {"text":"data_5752"} {"text":"data_5753"} {"text":"data_5754"} {"text":"data_5755"} {"text":"data_5756"} {"text":"data_5757"} {"text":"data_5758"} {"text":"data_5759"} {"text":"data_5760"} {"text":"data_5761"} {"text":"data_5762"} {"text":"data_5763"} {"text":"data_5764"} {"text":"data_5765"} {"text":"data_5766"} {"text":"data_5767"} {"text":"data_5768"} {"text":"data_5769"} {"text":"data_5770"} {"text":"data_5771"} {"text":"data_5772"} {"text":"data_5773"} {"text":"data_5774"} {"text":"data_5775"} {"text":"data_5776"} {"text":"data_5777"} {"text":"data_5778"} {"text":"data_5779"} {"text":"data_5780"} {"text":"data_5781"} {"text":"data_5782"} {"text":"data_5783"} {"text":"data_5784"} {"text":"data_5785"} {"text":"data_5786"} {"text":"data_5787"} {"text":"data_5788"} {"text":"data_5789"} {"text":"data_5790"} {"text":"data_5791"} {"text":"data_5792"} {"text":"data_5793"} {"text":"data_5794"} {"text":"data_5795"} {"text":"data_5796"} {"text":"data_5797"} {"text":"data_5798"} {"text":"data_5799"} {"text":"data_5800"} {"text":"data_5801"} {"text":"data_5802"} {"text":"data_5803"} {"text":"data_5804"} {"text":"data_5805"} {"text":"data_5806"} {"text":"data_5807"} {"text":"data_5808"} {"text":"data_5809"} {"text":"data_5810"} {"text":"data_5811"} {"text":"data_5812"} {"text":"data_5813"} {"text":"data_5814"} {"text":"data_5815"} {"text":"data_5816"} {"text":"data_5817"} {"text":"data_5818"} {"text":"data_5819"} {"text":"data_5820"} {"text":"data_5821"} {"text":"data_5822"} {"text":"data_5823"} {"text":"data_5824"} {"text":"data_5825"} {"text":"data_5826"} {"text":"data_5827"} {"text":"data_5828"} {"text":"data_5829"} {"text":"data_5830"} {"text":"data_5831"} {"text":"data_5832"} {"text":"data_5833"} {"text":"data_5834"} {"text":"data_5835"} {"text":"data_5836"} {"text":"data_5837"} {"text":"data_5838"} {"text":"data_5839"} {"text":"data_5840"} {"text":"data_5841"} {"text":"data_5842"} {"text":"data_5843"} {"text":"data_5844"} {"text":"data_5845"} {"text":"data_5846"} {"text":"data_5847"} {"text":"data_5848"} {"text":"data_5849"} {"text":"data_5850"} {"text":"data_5851"} {"text":"data_5852"} {"text":"data_5853"} {"text":"data_5854"} {"text":"data_5855"} {"text":"data_5856"} {"text":"data_5857"} {"text":"data_5858"} {"text":"data_5859"} {"text":"data_5860"} {"text":"data_5861"} {"text":"data_5862"} {"text":"data_5863"} {"text":"data_5864"} {"text":"data_5865"} {"text":"data_5866"} {"text":"data_5867"} {"text":"data_5868"} {"text":"data_5869"} {"text":"data_5870"} {"text":"data_5871"} {"text":"data_5872"} {"text":"data_5873"} {"text":"data_5874"} {"text":"data_5875"} {"text":"data_5876"} {"text":"data_5877"} {"text":"data_5878"} {"text":"data_5879"} {"text":"data_5880"} {"text":"data_5881"} {"text":"data_5882"} {"text":"data_5883"} {"text":"data_5884"} {"text":"data_5885"} {"text":"data_5886"} {"text":"data_5887"} {"text":"data_5888"} {"text":"data_5889"} {"text":"data_5890"} {"text":"data_5891"} {"text":"data_5892"} {"text":"data_5893"} {"text":"data_5894"} {"text":"data_5895"} {"text":"data_5896"} {"text":"data_5897"} {"text":"data_5898"} {"text":"data_5899"} {"text":"data_5900"} {"text":"data_5901"} {"text":"data_5902"} {"text":"data_5903"} {"text":"data_5904"} {"text":"data_5905"} {"text":"data_5906"} {"text":"data_5907"} {"text":"data_5908"} {"text":"data_5909"} {"text":"data_5910"} {"text":"data_5911"} {"text":"data_5912"} {"text":"data_5913"} {"text":"data_5914"} {"text":"data_5915"} {"text":"data_5916"} {"text":"data_5917"} {"text":"data_5918"} {"text":"data_5919"} {"text":"data_5920"} {"text":"data_5921"} {"text":"data_5922"} {"text":"data_5923"} {"text":"data_5924"} {"text":"data_5925"} {"text":"data_5926"} {"text":"data_5927"} {"text":"data_5928"} {"text":"data_5929"} {"text":"data_5930"} {"text":"data_5931"} {"text":"data_5932"} {"text":"data_5933"} {"text":"data_5934"} {"text":"data_5935"} {"text":"data_5936"} {"text":"data_5937"} {"text":"data_5938"} {"text":"data_5939"} {"text":"data_5940"} {"text":"data_5941"} {"text":"data_5942"} {"text":"data_5943"} {"text":"data_5944"} {"text":"data_5945"} {"text":"data_5946"} {"text":"data_5947"} {"text":"data_5948"} {"text":"data_5949"} {"text":"data_5950"} {"text":"data_5951"} {"text":"data_5952"} {"text":"data_5953"} {"text":"data_5954"} {"text":"data_5955"} {"text":"data_5956"} {"text":"data_5957"} {"text":"data_5958"} {"text":"data_5959"} {"text":"data_5960"} {"text":"data_5961"} {"text":"data_5962"} {"text":"data_5963"} {"text":"data_5964"} {"text":"data_5965"} {"text":"data_5966"} {"text":"data_5967"} {"text":"data_5968"} {"text":"data_5969"} {"text":"data_5970"} {"text":"data_5971"} {"text":"data_5972"} {"text":"data_5973"} {"text":"data_5974"} {"text":"data_5975"} {"text":"data_5976"} {"text":"data_5977"} {"text":"data_5978"} {"text":"data_5979"} {"text":"data_5980"} {"text":"data_5981"} {"text":"data_5982"} {"text":"data_5983"} {"text":"data_5984"} {"text":"data_5985"} {"text":"data_5986"} {"text":"data_5987"} {"text":"data_5988"} {"text":"data_5989"} {"text":"data_5990"} {"text":"data_5991"} {"text":"data_5992"} {"text":"data_5993"} {"text":"data_5994"} {"text":"data_5995"} {"text":"data_5996"} {"text":"data_5997"} {"text":"data_5998"} {"text":"data_5999"} {"text":"data_6000"} {"text":"data_6001"} {"text":"data_6002"} {"text":"data_6003"} {"text":"data_6004"} {"text":"data_6005"} {"text":"data_6006"} {"text":"data_6007"} {"text":"data_6008"} {"text":"data_6009"} {"text":"data_6010"} {"text":"data_6011"} {"text":"data_6012"} {"text":"data_6013"} {"text":"data_6014"} {"text":"data_6015"} {"text":"data_6016"} {"text":"data_6017"} {"text":"data_6018"} {"text":"data_6019"} {"text":"data_6020"} {"text":"data_6021"} {"text":"data_6022"} {"text":"data_6023"} {"text":"data_6024"} {"text":"data_6025"} {"text":"data_6026"} {"text":"data_6027"} {"text":"data_6028"} {"text":"data_6029"} {"text":"data_6030"} {"text":"data_6031"} {"text":"data_6032"} {"text":"data_6033"} {"text":"data_6034"} {"text":"data_6035"} {"text":"data_6036"} {"text":"data_6037"} {"text":"data_6038"} {"text":"data_6039"} {"text":"data_6040"} {"text":"data_6041"} {"text":"data_6042"} {"text":"data_6043"} {"text":"data_6044"} {"text":"data_6045"} {"text":"data_6046"} {"text":"data_6047"} {"text":"data_6048"} {"text":"data_6049"} {"text":"data_6050"} {"text":"data_6051"} {"text":"data_6052"} {"text":"data_6053"} {"text":"data_6054"} {"text":"data_6055"} {"text":"data_6056"} {"text":"data_6057"} {"text":"data_6058"} {"text":"data_6059"} {"text":"data_6060"} {"text":"data_6061"} {"text":"data_6062"} {"text":"data_6063"} {"text":"data_6064"} {"text":"data_6065"} {"text":"data_6066"} {"text":"data_6067"} {"text":"data_6068"} {"text":"data_6069"} {"text":"data_6070"} {"text":"data_6071"} {"text":"data_6072"} {"text":"data_6073"} {"text":"data_6074"} {"text":"data_6075"} {"text":"data_6076"} {"text":"data_6077"} {"text":"data_6078"} {"text":"data_6079"} {"text":"data_6080"} {"text":"data_6081"} {"text":"data_6082"} {"text":"data_6083"} {"text":"data_6084"} {"text":"data_6085"} {"text":"data_6086"} {"text":"data_6087"} {"text":"data_6088"} {"text":"data_6089"} {"text":"data_6090"} {"text":"data_6091"} {"text":"data_6092"} {"text":"data_6093"} {"text":"data_6094"} {"text":"data_6095"} {"text":"data_6096"} {"text":"data_6097"} {"text":"data_6098"} {"text":"data_6099"} {"text":"data_6100"} {"text":"data_6101"} {"text":"data_6102"} {"text":"data_6103"} {"text":"data_6104"} {"text":"data_6105"} {"text":"data_6106"} {"text":"data_6107"} {"text":"data_6108"} {"text":"data_6109"} {"text":"data_6110"} {"text":"data_6111"} {"text":"data_6112"} {"text":"data_6113"} {"text":"data_6114"} {"text":"data_6115"} {"text":"data_6116"} {"text":"data_6117"} {"text":"data_6118"} {"text":"data_6119"} {"text":"data_6120"} {"text":"data_6121"} {"text":"data_6122"} {"text":"data_6123"} {"text":"data_6124"} {"text":"data_6125"} {"text":"data_6126"} {"text":"data_6127"} {"text":"data_6128"} {"text":"data_6129"} {"text":"data_6130"} {"text":"data_6131"} {"text":"data_6132"} {"text":"data_6133"} {"text":"data_6134"} {"text":"data_6135"} {"text":"data_6136"} {"text":"data_6137"} {"text":"data_6138"} {"text":"data_6139"} {"text":"data_6140"} {"text":"data_6141"} {"text":"data_6142"} {"text":"data_6143"} {"text":"data_6144"} {"text":"data_6145"} {"text":"data_6146"} {"text":"data_6147"} {"text":"data_6148"} {"text":"data_6149"} {"text":"data_6150"} {"text":"data_6151"} {"text":"data_6152"} {"text":"data_6153"} {"text":"data_6154"} {"text":"data_6155"} {"text":"data_6156"} {"text":"data_6157"} {"text":"data_6158"} {"text":"data_6159"} {"text":"data_6160"} {"text":"data_6161"} {"text":"data_6162"} {"text":"data_6163"} {"text":"data_6164"} {"text":"data_6165"} {"text":"data_6166"} {"text":"data_6167"} {"text":"data_6168"} {"text":"data_6169"} {"text":"data_6170"} {"text":"data_6171"} {"text":"data_6172"} {"text":"data_6173"} {"text":"data_6174"} {"text":"data_6175"} {"text":"data_6176"} {"text":"data_6177"} {"text":"data_6178"} {"text":"data_6179"} {"text":"data_6180"} {"text":"data_6181"} {"text":"data_6182"} {"text":"data_6183"} {"text":"data_6184"} {"text":"data_6185"} {"text":"data_6186"} {"text":"data_6187"} {"text":"data_6188"} {"text":"data_6189"} {"text":"data_6190"} {"text":"data_6191"} {"text":"data_6192"} {"text":"data_6193"} {"text":"data_6194"} {"text":"data_6195"} {"text":"data_6196"} {"text":"data_6197"} {"text":"data_6198"} {"text":"data_6199"} {"text":"data_6200"} {"text":"data_6201"} {"text":"data_6202"} {"text":"data_6203"} {"text":"data_6204"} {"text":"data_6205"} {"text":"data_6206"} {"text":"data_6207"} {"text":"data_6208"} {"text":"data_6209"} {"text":"data_6210"} {"text":"data_6211"} {"text":"data_6212"} {"text":"data_6213"} {"text":"data_6214"} {"text":"data_6215"} {"text":"data_6216"} {"text":"data_6217"} {"text":"data_6218"} {"text":"data_6219"} {"text":"data_6220"} {"text":"data_6221"} {"text":"data_6222"} {"text":"data_6223"} {"text":"data_6224"} {"text":"data_6225"} {"text":"data_6226"} {"text":"data_6227"} {"text":"data_6228"} {"text":"data_6229"} {"text":"data_6230"} {"text":"data_6231"} {"text":"data_6232"} {"text":"data_6233"} {"text":"data_6234"} {"text":"data_6235"} {"text":"data_6236"} {"text":"data_6237"} {"text":"data_6238"} {"text":"data_6239"} {"text":"data_6240"} {"text":"data_6241"} {"text":"data_6242"} {"text":"data_6243"} {"text":"data_6244"} {"text":"data_6245"} {"text":"data_6246"} {"text":"data_6247"} {"text":"data_6248"} {"text":"data_6249"} {"text":"data_6250"} {"text":"data_6251"} {"text":"data_6252"} {"text":"data_6253"} {"text":"data_6254"} {"text":"data_6255"} {"text":"data_6256"} {"text":"data_6257"} {"text":"data_6258"} {"text":"data_6259"} {"text":"data_6260"} {"text":"data_6261"} {"text":"data_6262"} {"text":"data_6263"} {"text":"data_6264"} {"text":"data_6265"} {"text":"data_6266"} {"text":"data_6267"} {"text":"data_6268"} {"text":"data_6269"} {"text":"data_6270"} {"text":"data_6271"} {"text":"data_6272"} {"text":"data_6273"} {"text":"data_6274"} {"text":"data_6275"} {"text":"data_6276"} {"text":"data_6277"} {"text":"data_6278"} {"text":"data_6279"} {"text":"data_6280"} {"text":"data_6281"} {"text":"data_6282"} {"text":"data_6283"} {"text":"data_6284"} {"text":"data_6285"} {"text":"data_6286"} {"text":"data_6287"} {"text":"data_6288"} {"text":"data_6289"} {"text":"data_6290"} {"text":"data_6291"} {"text":"data_6292"} {"text":"data_6293"} {"text":"data_6294"} {"text":"data_6295"} {"text":"data_6296"} {"text":"data_6297"} {"text":"data_6298"} {"text":"data_6299"} {"text":"data_6300"} {"text":"data_6301"} {"text":"data_6302"} {"text":"data_6303"} {"text":"data_6304"} {"text":"data_6305"} {"text":"data_6306"} {"text":"data_6307"} {"text":"data_6308"} {"text":"data_6309"} {"text":"data_6310"} {"text":"data_6311"} {"text":"data_6312"} {"text":"data_6313"} {"text":"data_6314"} {"text":"data_6315"} {"text":"data_6316"} {"text":"data_6317"} {"text":"data_6318"} {"text":"data_6319"} {"text":"data_6320"} {"text":"data_6321"} {"text":"data_6322"} {"text":"data_6323"} {"text":"data_6324"} {"text":"data_6325"} {"text":"data_6326"} {"text":"data_6327"} {"text":"data_6328"} {"text":"data_6329"} {"text":"data_6330"} {"text":"data_6331"} {"text":"data_6332"} {"text":"data_6333"} {"text":"data_6334"} {"text":"data_6335"} {"text":"data_6336"} {"text":"data_6337"} {"text":"data_6338"} {"text":"data_6339"} {"text":"data_6340"} {"text":"data_6341"} {"text":"data_6342"} {"text":"data_6343"} {"text":"data_6344"} {"text":"data_6345"} {"text":"data_6346"} {"text":"data_6347"} {"text":"data_6348"} {"text":"data_6349"} {"text":"data_6350"} {"text":"data_6351"} {"text":"data_6352"} {"text":"data_6353"} {"text":"data_6354"} {"text":"data_6355"} {"text":"data_6356"} {"text":"data_6357"} {"text":"data_6358"} {"text":"data_6359"} {"text":"data_6360"} {"text":"data_6361"} {"text":"data_6362"} {"text":"data_6363"} {"text":"data_6364"} {"text":"data_6365"} {"text":"data_6366"} {"text":"data_6367"} {"text":"data_6368"} {"text":"data_6369"} {"text":"data_6370"} {"text":"data_6371"} {"text":"data_6372"} {"text":"data_6373"} {"text":"data_6374"} {"text":"data_6375"} {"text":"data_6376"} {"text":"data_6377"} {"text":"data_6378"} {"text":"data_6379"} {"text":"data_6380"} {"text":"data_6381"} {"text":"data_6382"} {"text":"data_6383"} {"text":"data_6384"} {"text":"data_6385"} {"text":"data_6386"} {"text":"data_6387"} {"text":"data_6388"} {"text":"data_6389"} {"text":"data_6390"} {"text":"data_6391"} {"text":"data_6392"} {"text":"data_6393"} {"text":"data_6394"} {"text":"data_6395"} {"text":"data_6396"} {"text":"data_6397"} {"text":"data_6398"} {"text":"data_6399"} {"text":"data_6400"} {"text":"data_6401"} {"text":"data_6402"} {"text":"data_6403"} {"text":"data_6404"} {"text":"data_6405"} {"text":"data_6406"} {"text":"data_6407"} {"text":"data_6408"} {"text":"data_6409"} {"text":"data_6410"} {"text":"data_6411"} {"text":"data_6412"} {"text":"data_6413"} {"text":"data_6414"} {"text":"data_6415"} {"text":"data_6416"} {"text":"data_6417"} {"text":"data_6418"} {"text":"data_6419"} {"text":"data_6420"} {"text":"data_6421"} {"text":"data_6422"} {"text":"data_6423"} {"text":"data_6424"} {"text":"data_6425"} {"text":"data_6426"} {"text":"data_6427"} {"text":"data_6428"} {"text":"data_6429"} {"text":"data_6430"} {"text":"data_6431"} {"text":"data_6432"} {"text":"data_6433"} {"text":"data_6434"} {"text":"data_6435"} {"text":"data_6436"} {"text":"data_6437"} {"text":"data_6438"} {"text":"data_6439"} {"text":"data_6440"} {"text":"data_6441"} {"text":"data_6442"} {"text":"data_6443"} {"text":"data_6444"} {"text":"data_6445"} {"text":"data_6446"} {"text":"data_6447"} {"text":"data_6448"} {"text":"data_6449"} {"text":"data_6450"} {"text":"data_6451"} {"text":"data_6452"} {"text":"data_6453"} {"text":"data_6454"} {"text":"data_6455"} {"text":"data_6456"} {"text":"data_6457"} {"text":"data_6458"} {"text":"data_6459"} {"text":"data_6460"} {"text":"data_6461"} {"text":"data_6462"} {"text":"data_6463"} {"text":"data_6464"} {"text":"data_6465"} {"text":"data_6466"} {"text":"data_6467"} {"text":"data_6468"} {"text":"data_6469"} {"text":"data_6470"} {"text":"data_6471"} {"text":"data_6472"} {"text":"data_6473"} {"text":"data_6474"} {"text":"data_6475"} {"text":"data_6476"} {"text":"data_6477"} {"text":"data_6478"} {"text":"data_6479"} {"text":"data_6480"} {"text":"data_6481"} {"text":"data_6482"} {"text":"data_6483"} {"text":"data_6484"} {"text":"data_6485"} {"text":"data_6486"} {"text":"data_6487"} {"text":"data_6488"} {"text":"data_6489"} {"text":"data_6490"} {"text":"data_6491"} {"text":"data_6492"} {"text":"data_6493"} {"text":"data_6494"} {"text":"data_6495"} {"text":"data_6496"} {"text":"data_6497"} {"text":"data_6498"} {"text":"data_6499"} {"text":"data_6500"} {"text":"data_6501"} {"text":"data_6502"} {"text":"data_6503"} {"text":"data_6504"} {"text":"data_6505"} {"text":"data_6506"} {"text":"data_6507"} {"text":"data_6508"} {"text":"data_6509"} {"text":"data_6510"} {"text":"data_6511"} {"text":"data_6512"} {"text":"data_6513"} {"text":"data_6514"} {"text":"data_6515"} {"text":"data_6516"} {"text":"data_6517"} {"text":"data_6518"} {"text":"data_6519"} {"text":"data_6520"} {"text":"data_6521"} {"text":"data_6522"} {"text":"data_6523"} {"text":"data_6524"} {"text":"data_6525"} {"text":"data_6526"} {"text":"data_6527"} {"text":"data_6528"} {"text":"data_6529"} {"text":"data_6530"} {"text":"data_6531"} {"text":"data_6532"} {"text":"data_6533"} {"text":"data_6534"} {"text":"data_6535"} {"text":"data_6536"} {"text":"data_6537"} {"text":"data_6538"} {"text":"data_6539"} {"text":"data_6540"} {"text":"data_6541"} {"text":"data_6542"} {"text":"data_6543"} {"text":"data_6544"} {"text":"data_6545"} {"text":"data_6546"} {"text":"data_6547"} {"text":"data_6548"} {"text":"data_6549"} {"text":"data_6550"} {"text":"data_6551"} {"text":"data_6552"} {"text":"data_6553"} {"text":"data_6554"} {"text":"data_6555"} {"text":"data_6556"} {"text":"data_6557"} {"text":"data_6558"} {"text":"data_6559"} {"text":"data_6560"} {"text":"data_6561"} {"text":"data_6562"} {"text":"data_6563"} {"text":"data_6564"} {"text":"data_6565"} {"text":"data_6566"} {"text":"data_6567"} {"text":"data_6568"} {"text":"data_6569"} {"text":"data_6570"} {"text":"data_6571"} {"text":"data_6572"} {"text":"data_6573"} {"text":"data_6574"} {"text":"data_6575"} {"text":"data_6576"} {"text":"data_6577"} {"text":"data_6578"} {"text":"data_6579"} {"text":"data_6580"} {"text":"data_6581"} {"text":"data_6582"} {"text":"data_6583"} {"text":"data_6584"} {"text":"data_6585"} {"text":"data_6586"} {"text":"data_6587"} {"text":"data_6588"} {"text":"data_6589"} {"text":"data_6590"} {"text":"data_6591"} {"text":"data_6592"} {"text":"data_6593"} {"text":"data_6594"} {"text":"data_6595"} {"text":"data_6596"} {"text":"data_6597"} {"text":"data_6598"} {"text":"data_6599"} {"text":"data_6600"} {"text":"data_6601"} {"text":"data_6602"} {"text":"data_6603"} {"text":"data_6604"} {"text":"data_6605"} {"text":"data_6606"} {"text":"data_6607"} {"text":"data_6608"} {"text":"data_6609"} {"text":"data_6610"} {"text":"data_6611"} {"text":"data_6612"} {"text":"data_6613"} {"text":"data_6614"} {"text":"data_6615"} {"text":"data_6616"} {"text":"data_6617"} {"text":"data_6618"} {"text":"data_6619"} {"text":"data_6620"} {"text":"data_6621"} {"text":"data_6622"} {"text":"data_6623"} {"text":"data_6624"} {"text":"data_6625"} {"text":"data_6626"} {"text":"data_6627"} {"text":"data_6628"} {"text":"data_6629"} {"text":"data_6630"} {"text":"data_6631"} {"text":"data_6632"} {"text":"data_6633"} {"text":"data_6634"} {"text":"data_6635"} {"text":"data_6636"} {"text":"data_6637"} {"text":"data_6638"} {"text":"data_6639"} {"text":"data_6640"} {"text":"data_6641"} {"text":"data_6642"} {"text":"data_6643"} {"text":"data_6644"} {"text":"data_6645"} {"text":"data_6646"} {"text":"data_6647"} {"text":"data_6648"} {"text":"data_6649"} {"text":"data_6650"} {"text":"data_6651"} {"text":"data_6652"} {"text":"data_6653"} {"text":"data_6654"} {"text":"data_6655"} {"text":"data_6656"} {"text":"data_6657"} {"text":"data_6658"} {"text":"data_6659"} {"text":"data_6660"} {"text":"data_6661"} {"text":"data_6662"} {"text":"data_6663"} {"text":"data_6664"} {"text":"data_6665"} {"text":"data_6666"} {"text":"data_6667"} {"text":"data_6668"} {"text":"data_6669"} {"text":"data_6670"} {"text":"data_6671"} {"text":"data_6672"} {"text":"data_6673"} {"text":"data_6674"} {"text":"data_6675"} {"text":"data_6676"} {"text":"data_6677"} {"text":"data_6678"} {"text":"data_6679"} {"text":"data_6680"} {"text":"data_6681"} {"text":"data_6682"} {"text":"data_6683"} {"text":"data_6684"} {"text":"data_6685"} {"text":"data_6686"} {"text":"data_6687"} {"text":"data_6688"} {"text":"data_6689"} {"text":"data_6690"} {"text":"data_6691"} {"text":"data_6692"} {"text":"data_6693"} {"text":"data_6694"} {"text":"data_6695"} {"text":"data_6696"} {"text":"data_6697"} {"text":"data_6698"} {"text":"data_6699"} {"text":"data_6700"} {"text":"data_6701"} {"text":"data_6702"} {"text":"data_6703"} {"text":"data_6704"} {"text":"data_6705"} {"text":"data_6706"} {"text":"data_6707"} {"text":"data_6708"} {"text":"data_6709"} {"text":"data_6710"} {"text":"data_6711"} {"text":"data_6712"} {"text":"data_6713"} {"text":"data_6714"} {"text":"data_6715"} {"text":"data_6716"} {"text":"data_6717"} {"text":"data_6718"} {"text":"data_6719"} {"text":"data_6720"} {"text":"data_6721"} {"text":"data_6722"} {"text":"data_6723"} {"text":"data_6724"} {"text":"data_6725"} {"text":"data_6726"} {"text":"data_6727"} {"text":"data_6728"} {"text":"data_6729"} {"text":"data_6730"} {"text":"data_6731"} {"text":"data_6732"} {"text":"data_6733"} {"text":"data_6734"} {"text":"data_6735"} {"text":"data_6736"} {"text":"data_6737"} {"text":"data_6738"} {"text":"data_6739"} {"text":"data_6740"} {"text":"data_6741"} {"text":"data_6742"} {"text":"data_6743"} {"text":"data_6744"} {"text":"data_6745"} {"text":"data_6746"} {"text":"data_6747"} {"text":"data_6748"} {"text":"data_6749"} {"text":"data_6750"} {"text":"data_6751"} {"text":"data_6752"} {"text":"data_6753"} {"text":"data_6754"} {"text":"data_6755"} {"text":"data_6756"} {"text":"data_6757"} {"text":"data_6758"} {"text":"data_6759"} {"text":"data_6760"} {"text":"data_6761"} {"text":"data_6762"} {"text":"data_6763"} {"text":"data_6764"} {"text":"data_6765"} {"text":"data_6766"} {"text":"data_6767"} {"text":"data_6768"} {"text":"data_6769"} {"text":"data_6770"} {"text":"data_6771"} {"text":"data_6772"} {"text":"data_6773"} {"text":"data_6774"} {"text":"data_6775"} {"text":"data_6776"} {"text":"data_6777"} {"text":"data_6778"} {"text":"data_6779"} {"text":"data_6780"} {"text":"data_6781"} {"text":"data_6782"} {"text":"data_6783"} {"text":"data_6784"} {"text":"data_6785"} {"text":"data_6786"} {"text":"data_6787"} {"text":"data_6788"} {"text":"data_6789"} {"text":"data_6790"} {"text":"data_6791"} {"text":"data_6792"} {"text":"data_6793"} {"text":"data_6794"} {"text":"data_6795"} {"text":"data_6796"} {"text":"data_6797"} {"text":"data_6798"} {"text":"data_6799"} {"text":"data_6800"} {"text":"data_6801"} {"text":"data_6802"} {"text":"data_6803"} {"text":"data_6804"} {"text":"data_6805"} {"text":"data_6806"} {"text":"data_6807"} {"text":"data_6808"} {"text":"data_6809"} {"text":"data_6810"} {"text":"data_6811"} {"text":"data_6812"} {"text":"data_6813"} {"text":"data_6814"} {"text":"data_6815"} {"text":"data_6816"} {"text":"data_6817"} {"text":"data_6818"} {"text":"data_6819"} {"text":"data_6820"} {"text":"data_6821"} {"text":"data_6822"} {"text":"data_6823"} {"text":"data_6824"} {"text":"data_6825"} {"text":"data_6826"} {"text":"data_6827"} {"text":"data_6828"} {"text":"data_6829"} {"text":"data_6830"} {"text":"data_6831"} {"text":"data_6832"} {"text":"data_6833"} {"text":"data_6834"} {"text":"data_6835"} {"text":"data_6836"} {"text":"data_6837"} {"text":"data_6838"} {"text":"data_6839"} {"text":"data_6840"} {"text":"data_6841"} {"text":"data_6842"} {"text":"data_6843"} {"text":"data_6844"} {"text":"data_6845"} {"text":"data_6846"} {"text":"data_6847"} {"text":"data_6848"} {"text":"data_6849"} {"text":"data_6850"} {"text":"data_6851"} {"text":"data_6852"} {"text":"data_6853"} {"text":"data_6854"} {"text":"data_6855"} {"text":"data_6856"} {"text":"data_6857"} {"text":"data_6858"} {"text":"data_6859"} {"text":"data_6860"} {"text":"data_6861"} {"text":"data_6862"} {"text":"data_6863"} {"text":"data_6864"} {"text":"data_6865"} {"text":"data_6866"} {"text":"data_6867"} {"text":"data_6868"} {"text":"data_6869"} {"text":"data_6870"} {"text":"data_6871"} {"text":"data_6872"} {"text":"data_6873"} {"text":"data_6874"} {"text":"data_6875"} {"text":"data_6876"} {"text":"data_6877"} {"text":"data_6878"} {"text":"data_6879"} {"text":"data_6880"} {"text":"data_6881"} {"text":"data_6882"} {"text":"data_6883"} {"text":"data_6884"} {"text":"data_6885"} {"text":"data_6886"} {"text":"data_6887"} {"text":"data_6888"} {"text":"data_6889"} {"text":"data_6890"} {"text":"data_6891"} {"text":"data_6892"} {"text":"data_6893"} {"text":"data_6894"} {"text":"data_6895"} {"text":"data_6896"} {"text":"data_6897"} {"text":"data_6898"} {"text":"data_6899"} {"text":"data_6900"} {"text":"data_6901"} {"text":"data_6902"} {"text":"data_6903"} {"text":"data_6904"} {"text":"data_6905"} {"text":"data_6906"} {"text":"data_6907"} {"text":"data_6908"} {"text":"data_6909"} {"text":"data_6910"} {"text":"data_6911"} {"text":"data_6912"} {"text":"data_6913"} {"text":"data_6914"} {"text":"data_6915"} {"text":"data_6916"} {"text":"data_6917"} {"text":"data_6918"} {"text":"data_6919"} {"text":"data_6920"} {"text":"data_6921"} {"text":"data_6922"} {"text":"data_6923"} {"text":"data_6924"} {"text":"data_6925"} {"text":"data_6926"} {"text":"data_6927"} {"text":"data_6928"} {"text":"data_6929"} {"text":"data_6930"} {"text":"data_6931"} {"text":"data_6932"} {"text":"data_6933"} {"text":"data_6934"} {"text":"data_6935"} {"text":"data_6936"} {"text":"data_6937"} {"text":"data_6938"} {"text":"data_6939"} {"text":"data_6940"} {"text":"data_6941"} {"text":"data_6942"} {"text":"data_6943"} {"text":"data_6944"} {"text":"data_6945"} {"text":"data_6946"} {"text":"data_6947"} {"text":"data_6948"} {"text":"data_6949"} {"text":"data_6950"} {"text":"data_6951"} {"text":"data_6952"} {"text":"data_6953"} {"text":"data_6954"} {"text":"data_6955"} {"text":"data_6956"} {"text":"data_6957"} {"text":"data_6958"} {"text":"data_6959"} {"text":"data_6960"} {"text":"data_6961"} {"text":"data_6962"} {"text":"data_6963"} {"text":"data_6964"} {"text":"data_6965"} {"text":"data_6966"} {"text":"data_6967"} {"text":"data_6968"} {"text":"data_6969"} {"text":"data_6970"} {"text":"data_6971"} {"text":"data_6972"} {"text":"data_6973"} {"text":"data_6974"} {"text":"data_6975"} {"text":"data_6976"} {"text":"data_6977"} {"text":"data_6978"} {"text":"data_6979"} {"text":"data_6980"} {"text":"data_6981"} {"text":"data_6982"} {"text":"data_6983"} {"text":"data_6984"} {"text":"data_6985"} {"text":"data_6986"} {"text":"data_6987"} {"text":"data_6988"} {"text":"data_6989"} {"text":"data_6990"} {"text":"data_6991"} {"text":"data_6992"} {"text":"data_6993"} {"text":"data_6994"} {"text":"data_6995"} {"text":"data_6996"} {"text":"data_6997"} {"text":"data_6998"} {"text":"data_6999"} {"text":"data_7000"} {"text":"data_7001"} {"text":"data_7002"} {"text":"data_7003"} {"text":"data_7004"} {"text":"data_7005"} {"text":"data_7006"} {"text":"data_7007"} {"text":"data_7008"} {"text":"data_7009"} {"text":"data_7010"} {"text":"data_7011"} {"text":"data_7012"} {"text":"data_7013"} {"text":"data_7014"} {"text":"data_7015"} {"text":"data_7016"} {"text":"data_7017"} {"text":"data_7018"} {"text":"data_7019"} {"text":"data_7020"} {"text":"data_7021"} {"text":"data_7022"} {"text":"data_7023"} {"text":"data_7024"} {"text":"data_7025"} {"text":"data_7026"} {"text":"data_7027"} {"text":"data_7028"} {"text":"data_7029"} {"text":"data_7030"} {"text":"data_7031"} {"text":"data_7032"} {"text":"data_7033"} {"text":"data_7034"} {"text":"data_7035"} {"text":"data_7036"} {"text":"data_7037"} {"text":"data_7038"} {"text":"data_7039"} {"text":"data_7040"} {"text":"data_7041"} {"text":"data_7042"} {"text":"data_7043"} {"text":"data_7044"} {"text":"data_7045"} {"text":"data_7046"} {"text":"data_7047"} {"text":"data_7048"} {"text":"data_7049"} {"text":"data_7050"} {"text":"data_7051"} {"text":"data_7052"} {"text":"data_7053"} {"text":"data_7054"} {"text":"data_7055"} {"text":"data_7056"} {"text":"data_7057"} {"text":"data_7058"} {"text":"data_7059"} {"text":"data_7060"} {"text":"data_7061"} {"text":"data_7062"} {"text":"data_7063"} {"text":"data_7064"} {"text":"data_7065"} {"text":"data_7066"} {"text":"data_7067"} {"text":"data_7068"} {"text":"data_7069"} {"text":"data_7070"} {"text":"data_7071"} {"text":"data_7072"} {"text":"data_7073"} {"text":"data_7074"} {"text":"data_7075"} {"text":"data_7076"} {"text":"data_7077"} {"text":"data_7078"} {"text":"data_7079"} {"text":"data_7080"} {"text":"data_7081"} {"text":"data_7082"} {"text":"data_7083"} {"text":"data_7084"} {"text":"data_7085"} {"text":"data_7086"} {"text":"data_7087"} {"text":"data_7088"} {"text":"data_7089"} {"text":"data_7090"} {"text":"data_7091"} {"text":"data_7092"} {"text":"data_7093"} {"text":"data_7094"} {"text":"data_7095"} {"text":"data_7096"} {"text":"data_7097"} {"text":"data_7098"} {"text":"data_7099"} {"text":"data_7100"} {"text":"data_7101"} {"text":"data_7102"} {"text":"data_7103"} {"text":"data_7104"} {"text":"data_7105"} {"text":"data_7106"} {"text":"data_7107"} {"text":"data_7108"} {"text":"data_7109"} {"text":"data_7110"} {"text":"data_7111"} {"text":"data_7112"} {"text":"data_7113"} {"text":"data_7114"} {"text":"data_7115"} {"text":"data_7116"} {"text":"data_7117"} {"text":"data_7118"} {"text":"data_7119"} {"text":"data_7120"} {"text":"data_7121"} {"text":"data_7122"} {"text":"data_7123"} {"text":"data_7124"} {"text":"data_7125"} {"text":"data_7126"} {"text":"data_7127"} {"text":"data_7128"} {"text":"data_7129"} {"text":"data_7130"} {"text":"data_7131"} {"text":"data_7132"} {"text":"data_7133"} {"text":"data_7134"} {"text":"data_7135"} {"text":"data_7136"} {"text":"data_7137"} {"text":"data_7138"} {"text":"data_7139"} {"text":"data_7140"} {"text":"data_7141"} {"text":"data_7142"} {"text":"data_7143"} {"text":"data_7144"} {"text":"data_7145"} {"text":"data_7146"} {"text":"data_7147"} {"text":"data_7148"} {"text":"data_7149"} {"text":"data_7150"} {"text":"data_7151"} {"text":"data_7152"} {"text":"data_7153"} {"text":"data_7154"} {"text":"data_7155"} {"text":"data_7156"} {"text":"data_7157"} {"text":"data_7158"} {"text":"data_7159"} {"text":"data_7160"} {"text":"data_7161"} {"text":"data_7162"} {"text":"data_7163"} {"text":"data_7164"} {"text":"data_7165"} {"text":"data_7166"} {"text":"data_7167"} {"text":"data_7168"} {"text":"data_7169"} {"text":"data_7170"} {"text":"data_7171"} {"text":"data_7172"} {"text":"data_7173"} {"text":"data_7174"} {"text":"data_7175"} {"text":"data_7176"} {"text":"data_7177"} {"text":"data_7178"} {"text":"data_7179"} {"text":"data_7180"} {"text":"data_7181"} {"text":"data_7182"} {"text":"data_7183"} {"text":"data_7184"} {"text":"data_7185"} {"text":"data_7186"} {"text":"data_7187"} {"text":"data_7188"} {"text":"data_7189"} {"text":"data_7190"} {"text":"data_7191"} {"text":"data_7192"} {"text":"data_7193"} {"text":"data_7194"} {"text":"data_7195"} {"text":"data_7196"} {"text":"data_7197"} {"text":"data_7198"} {"text":"data_7199"} {"text":"data_7200"} {"text":"data_7201"} {"text":"data_7202"} {"text":"data_7203"} {"text":"data_7204"} {"text":"data_7205"} {"text":"data_7206"} {"text":"data_7207"} {"text":"data_7208"} {"text":"data_7209"} {"text":"data_7210"} {"text":"data_7211"} {"text":"data_7212"} {"text":"data_7213"} {"text":"data_7214"} {"text":"data_7215"} {"text":"data_7216"} {"text":"data_7217"} {"text":"data_7218"} {"text":"data_7219"} {"text":"data_7220"} {"text":"data_7221"} {"text":"data_7222"} {"text":"data_7223"} {"text":"data_7224"} {"text":"data_7225"} {"text":"data_7226"} {"text":"data_7227"} {"text":"data_7228"} {"text":"data_7229"} {"text":"data_7230"} {"text":"data_7231"} {"text":"data_7232"} {"text":"data_7233"} {"text":"data_7234"} {"text":"data_7235"} {"text":"data_7236"} {"text":"data_7237"} {"text":"data_7238"} {"text":"data_7239"} {"text":"data_7240"} {"text":"data_7241"} {"text":"data_7242"} {"text":"data_7243"} {"text":"data_7244"} {"text":"data_7245"} {"text":"data_7246"} {"text":"data_7247"} {"text":"data_7248"} {"text":"data_7249"} {"text":"data_7250"} {"text":"data_7251"} {"text":"data_7252"} {"text":"data_7253"} {"text":"data_7254"} {"text":"data_7255"} {"text":"data_7256"} {"text":"data_7257"} {"text":"data_7258"} {"text":"data_7259"} {"text":"data_7260"} {"text":"data_7261"} {"text":"data_7262"} {"text":"data_7263"} {"text":"data_7264"} {"text":"data_7265"} {"text":"data_7266"} {"text":"data_7267"} {"text":"data_7268"} {"text":"data_7269"} {"text":"data_7270"} {"text":"data_7271"} {"text":"data_7272"} {"text":"data_7273"} {"text":"data_7274"} {"text":"data_7275"} {"text":"data_7276"} {"text":"data_7277"} {"text":"data_7278"} {"text":"data_7279"} {"text":"data_7280"} {"text":"data_7281"} {"text":"data_7282"} {"text":"data_7283"} {"text":"data_7284"} {"text":"data_7285"} {"text":"data_7286"} {"text":"data_7287"} {"text":"data_7288"} {"text":"data_7289"} {"text":"data_7290"} {"text":"data_7291"} {"text":"data_7292"} {"text":"data_7293"} {"text":"data_7294"} {"text":"data_7295"} {"text":"data_7296"} {"text":"data_7297"} {"text":"data_7298"} {"text":"data_7299"} {"text":"data_7300"} {"text":"data_7301"} {"text":"data_7302"} {"text":"data_7303"} {"text":"data_7304"} {"text":"data_7305"} {"text":"data_7306"} {"text":"data_7307"} {"text":"data_7308"} {"text":"data_7309"} {"text":"data_7310"} {"text":"data_7311"} {"text":"data_7312"} {"text":"data_7313"} {"text":"data_7314"} {"text":"data_7315"} {"text":"data_7316"} {"text":"data_7317"} {"text":"data_7318"} {"text":"data_7319"} {"text":"data_7320"} {"text":"data_7321"} {"text":"data_7322"} {"text":"data_7323"} {"text":"data_7324"} {"text":"data_7325"} {"text":"data_7326"} {"text":"data_7327"} {"text":"data_7328"} {"text":"data_7329"} {"text":"data_7330"} {"text":"data_7331"} {"text":"data_7332"} {"text":"data_7333"} {"text":"data_7334"} {"text":"data_7335"} {"text":"data_7336"} {"text":"data_7337"} {"text":"data_7338"} {"text":"data_7339"} {"text":"data_7340"} {"text":"data_7341"} {"text":"data_7342"} {"text":"data_7343"} {"text":"data_7344"} {"text":"data_7345"} {"text":"data_7346"} {"text":"data_7347"} {"text":"data_7348"} {"text":"data_7349"} {"text":"data_7350"} {"text":"data_7351"} {"text":"data_7352"} {"text":"data_7353"} {"text":"data_7354"} {"text":"data_7355"} {"text":"data_7356"} {"text":"data_7357"} {"text":"data_7358"} {"text":"data_7359"} {"text":"data_7360"} {"text":"data_7361"} {"text":"data_7362"} {"text":"data_7363"} {"text":"data_7364"} {"text":"data_7365"} {"text":"data_7366"} {"text":"data_7367"} {"text":"data_7368"} {"text":"data_7369"} {"text":"data_7370"} {"text":"data_7371"} {"text":"data_7372"} {"text":"data_7373"} {"text":"data_7374"} {"text":"data_7375"} {"text":"data_7376"} {"text":"data_7377"} {"text":"data_7378"} {"text":"data_7379"} {"text":"data_7380"} {"text":"data_7381"} {"text":"data_7382"} {"text":"data_7383"} {"text":"data_7384"} {"text":"data_7385"} {"text":"data_7386"} {"text":"data_7387"} {"text":"data_7388"} {"text":"data_7389"} {"text":"data_7390"} {"text":"data_7391"} {"text":"data_7392"} {"text":"data_7393"} {"text":"data_7394"} {"text":"data_7395"} {"text":"data_7396"} {"text":"data_7397"} {"text":"data_7398"} {"text":"data_7399"} {"text":"data_7400"} {"text":"data_7401"} {"text":"data_7402"} {"text":"data_7403"} {"text":"data_7404"} {"text":"data_7405"} {"text":"data_7406"} {"text":"data_7407"} {"text":"data_7408"} {"text":"data_7409"} {"text":"data_7410"} {"text":"data_7411"} {"text":"data_7412"} {"text":"data_7413"} {"text":"data_7414"} {"text":"data_7415"} {"text":"data_7416"} {"text":"data_7417"} {"text":"data_7418"} {"text":"data_7419"} {"text":"data_7420"} {"text":"data_7421"} {"text":"data_7422"} {"text":"data_7423"} {"text":"data_7424"} {"text":"data_7425"} {"text":"data_7426"} {"text":"data_7427"} {"text":"data_7428"} {"text":"data_7429"} {"text":"data_7430"} {"text":"data_7431"} {"text":"data_7432"} {"text":"data_7433"} {"text":"data_7434"} {"text":"data_7435"} {"text":"data_7436"} {"text":"data_7437"} {"text":"data_7438"} {"text":"data_7439"} {"text":"data_7440"} {"text":"data_7441"} {"text":"data_7442"} {"text":"data_7443"} {"text":"data_7444"} {"text":"data_7445"} {"text":"data_7446"} {"text":"data_7447"} {"text":"data_7448"} {"text":"data_7449"} {"text":"data_7450"} {"text":"data_7451"} {"text":"data_7452"} {"text":"data_7453"} {"text":"data_7454"} {"text":"data_7455"} {"text":"data_7456"} {"text":"data_7457"} {"text":"data_7458"} {"text":"data_7459"} {"text":"data_7460"} {"text":"data_7461"} {"text":"data_7462"} {"text":"data_7463"} {"text":"data_7464"} {"text":"data_7465"} {"text":"data_7466"} {"text":"data_7467"} {"text":"data_7468"} {"text":"data_7469"} {"text":"data_7470"} {"text":"data_7471"} {"text":"data_7472"} {"text":"data_7473"} {"text":"data_7474"} {"text":"data_7475"} {"text":"data_7476"} {"text":"data_7477"} {"text":"data_7478"} {"text":"data_7479"} {"text":"data_7480"} {"text":"data_7481"} {"text":"data_7482"} {"text":"data_7483"} {"text":"data_7484"} {"text":"data_7485"} {"text":"data_7486"} {"text":"data_7487"} {"text":"data_7488"} {"text":"data_7489"} {"text":"data_7490"} {"text":"data_7491"} {"text":"data_7492"} {"text":"data_7493"} {"text":"data_7494"} {"text":"data_7495"} {"text":"data_7496"} {"text":"data_7497"} {"text":"data_7498"} {"text":"data_7499"} {"text":"data_7500"} {"text":"data_7501"} {"text":"data_7502"} {"text":"data_7503"} {"text":"data_7504"} {"text":"data_7505"} {"text":"data_7506"} {"text":"data_7507"} {"text":"data_7508"} {"text":"data_7509"} {"text":"data_7510"} {"text":"data_7511"} {"text":"data_7512"} {"text":"data_7513"} {"text":"data_7514"} {"text":"data_7515"} {"text":"data_7516"} {"text":"data_7517"} {"text":"data_7518"} {"text":"data_7519"} {"text":"data_7520"} {"text":"data_7521"} {"text":"data_7522"} {"text":"data_7523"} {"text":"data_7524"} {"text":"data_7525"} {"text":"data_7526"} {"text":"data_7527"} {"text":"data_7528"} {"text":"data_7529"} {"text":"data_7530"} {"text":"data_7531"} {"text":"data_7532"} {"text":"data_7533"} {"text":"data_7534"} {"text":"data_7535"} {"text":"data_7536"} {"text":"data_7537"} {"text":"data_7538"} {"text":"data_7539"} {"text":"data_7540"} {"text":"data_7541"} {"text":"data_7542"} {"text":"data_7543"} {"text":"data_7544"} {"text":"data_7545"} {"text":"data_7546"} {"text":"data_7547"} {"text":"data_7548"} {"text":"data_7549"} {"text":"data_7550"} {"text":"data_7551"} {"text":"data_7552"} {"text":"data_7553"} {"text":"data_7554"} {"text":"data_7555"} {"text":"data_7556"} {"text":"data_7557"} {"text":"data_7558"} {"text":"data_7559"} {"text":"data_7560"} {"text":"data_7561"} {"text":"data_7562"} {"text":"data_7563"} {"text":"data_7564"} {"text":"data_7565"} {"text":"data_7566"} {"text":"data_7567"} {"text":"data_7568"} {"text":"data_7569"} {"text":"data_7570"} {"text":"data_7571"} {"text":"data_7572"} {"text":"data_7573"} {"text":"data_7574"} {"text":"data_7575"} {"text":"data_7576"} {"text":"data_7577"} {"text":"data_7578"} {"text":"data_7579"} {"text":"data_7580"} {"text":"data_7581"} {"text":"data_7582"} {"text":"data_7583"} {"text":"data_7584"} {"text":"data_7585"} {"text":"data_7586"} {"text":"data_7587"} {"text":"data_7588"} {"text":"data_7589"} {"text":"data_7590"} {"text":"data_7591"} {"text":"data_7592"} {"text":"data_7593"} {"text":"data_7594"} {"text":"data_7595"} {"text":"data_7596"} {"text":"data_7597"} {"text":"data_7598"} {"text":"data_7599"} {"text":"data_7600"} {"text":"data_7601"} {"text":"data_7602"} {"text":"data_7603"} {"text":"data_7604"} {"text":"data_7605"} {"text":"data_7606"} {"text":"data_7607"} {"text":"data_7608"} {"text":"data_7609"} {"text":"data_7610"} {"text":"data_7611"} {"text":"data_7612"} {"text":"data_7613"} {"text":"data_7614"} {"text":"data_7615"} {"text":"data_7616"} {"text":"data_7617"} {"text":"data_7618"} {"text":"data_7619"} {"text":"data_7620"} {"text":"data_7621"} {"text":"data_7622"} {"text":"data_7623"} {"text":"data_7624"} {"text":"data_7625"} {"text":"data_7626"} {"text":"data_7627"} {"text":"data_7628"} {"text":"data_7629"} {"text":"data_7630"} {"text":"data_7631"} {"text":"data_7632"} {"text":"data_7633"} {"text":"data_7634"} {"text":"data_7635"} {"text":"data_7636"} {"text":"data_7637"} {"text":"data_7638"} {"text":"data_7639"} {"text":"data_7640"} {"text":"data_7641"} {"text":"data_7642"} {"text":"data_7643"} {"text":"data_7644"} {"text":"data_7645"} {"text":"data_7646"} {"text":"data_7647"} {"text":"data_7648"} {"text":"data_7649"} {"text":"data_7650"} {"text":"data_7651"} {"text":"data_7652"} {"text":"data_7653"} {"text":"data_7654"} {"text":"data_7655"} {"text":"data_7656"} {"text":"data_7657"} {"text":"data_7658"} {"text":"data_7659"} {"text":"data_7660"} {"text":"data_7661"} {"text":"data_7662"} {"text":"data_7663"} {"text":"data_7664"} {"text":"data_7665"} {"text":"data_7666"} {"text":"data_7667"} {"text":"data_7668"} {"text":"data_7669"} {"text":"data_7670"} {"text":"data_7671"} {"text":"data_7672"} {"text":"data_7673"} {"text":"data_7674"} {"text":"data_7675"} {"text":"data_7676"} {"text":"data_7677"} {"text":"data_7678"} {"text":"data_7679"} {"text":"data_7680"} {"text":"data_7681"} {"text":"data_7682"} {"text":"data_7683"} {"text":"data_7684"} {"text":"data_7685"} {"text":"data_7686"} {"text":"data_7687"} {"text":"data_7688"} {"text":"data_7689"} {"text":"data_7690"} {"text":"data_7691"} {"text":"data_7692"} {"text":"data_7693"} {"text":"data_7694"} {"text":"data_7695"} {"text":"data_7696"} {"text":"data_7697"} {"text":"data_7698"} {"text":"data_7699"} {"text":"data_7700"} {"text":"data_7701"} {"text":"data_7702"} {"text":"data_7703"} {"text":"data_7704"} {"text":"data_7705"} {"text":"data_7706"} {"text":"data_7707"} {"text":"data_7708"} {"text":"data_7709"} {"text":"data_7710"} {"text":"data_7711"} {"text":"data_7712"} {"text":"data_7713"} {"text":"data_7714"} {"text":"data_7715"} {"text":"data_7716"} {"text":"data_7717"} {"text":"data_7718"} {"text":"data_7719"} {"text":"data_7720"} {"text":"data_7721"} {"text":"data_7722"} {"text":"data_7723"} {"text":"data_7724"} {"text":"data_7725"} {"text":"data_7726"} {"text":"data_7727"} {"text":"data_7728"} {"text":"data_7729"} {"text":"data_7730"} {"text":"data_7731"} {"text":"data_7732"} {"text":"data_7733"} {"text":"data_7734"} {"text":"data_7735"} {"text":"data_7736"} {"text":"data_7737"} {"text":"data_7738"} {"text":"data_7739"} {"text":"data_7740"} {"text":"data_7741"} {"text":"data_7742"} {"text":"data_7743"} {"text":"data_7744"} {"text":"data_7745"} {"text":"data_7746"} {"text":"data_7747"} {"text":"data_7748"} {"text":"data_7749"} {"text":"data_7750"} {"text":"data_7751"} {"text":"data_7752"} {"text":"data_7753"} {"text":"data_7754"} {"text":"data_7755"} {"text":"data_7756"} {"text":"data_7757"} {"text":"data_7758"} {"text":"data_7759"} {"text":"data_7760"} {"text":"data_7761"} {"text":"data_7762"} {"text":"data_7763"} {"text":"data_7764"} {"text":"data_7765"} {"text":"data_7766"} {"text":"data_7767"} {"text":"data_7768"} {"text":"data_7769"} {"text":"data_7770"} {"text":"data_7771"} {"text":"data_7772"} {"text":"data_7773"} {"text":"data_7774"} {"text":"data_7775"} {"text":"data_7776"} {"text":"data_7777"} {"text":"data_7778"} {"text":"data_7779"} {"text":"data_7780"} {"text":"data_7781"} {"text":"data_7782"} {"text":"data_7783"} {"text":"data_7784"} {"text":"data_7785"} {"text":"data_7786"} {"text":"data_7787"} {"text":"data_7788"} {"text":"data_7789"} {"text":"data_7790"} {"text":"data_7791"} {"text":"data_7792"} {"text":"data_7793"} {"text":"data_7794"} {"text":"data_7795"} {"text":"data_7796"} {"text":"data_7797"} {"text":"data_7798"} {"text":"data_7799"} {"text":"data_7800"} {"text":"data_7801"} {"text":"data_7802"} {"text":"data_7803"} {"text":"data_7804"} {"text":"data_7805"} {"text":"data_7806"} {"text":"data_7807"} {"text":"data_7808"} {"text":"data_7809"} {"text":"data_7810"} {"text":"data_7811"} {"text":"data_7812"} {"text":"data_7813"} {"text":"data_7814"} {"text":"data_7815"} {"text":"data_7816"} {"text":"data_7817"} {"text":"data_7818"} {"text":"data_7819"} {"text":"data_7820"} {"text":"data_7821"} {"text":"data_7822"} {"text":"data_7823"} {"text":"data_7824"} {"text":"data_7825"} {"text":"data_7826"} {"text":"data_7827"} {"text":"data_7828"} {"text":"data_7829"} {"text":"data_7830"} {"text":"data_7831"} {"text":"data_7832"} {"text":"data_7833"} {"text":"data_7834"} {"text":"data_7835"} {"text":"data_7836"} {"text":"data_7837"} {"text":"data_7838"} {"text":"data_7839"} {"text":"data_7840"} {"text":"data_7841"} {"text":"data_7842"} {"text":"data_7843"} {"text":"data_7844"} {"text":"data_7845"} {"text":"data_7846"} {"text":"data_7847"} {"text":"data_7848"} {"text":"data_7849"} {"text":"data_7850"} {"text":"data_7851"} {"text":"data_7852"} {"text":"data_7853"} {"text":"data_7854"} {"text":"data_7855"} {"text":"data_7856"} {"text":"data_7857"} {"text":"data_7858"} {"text":"data_7859"} {"text":"data_7860"} {"text":"data_7861"} {"text":"data_7862"} {"text":"data_7863"} {"text":"data_7864"} {"text":"data_7865"} {"text":"data_7866"} {"text":"data_7867"} {"text":"data_7868"} {"text":"data_7869"} {"text":"data_7870"} {"text":"data_7871"} {"text":"data_7872"} {"text":"data_7873"} {"text":"data_7874"} {"text":"data_7875"} {"text":"data_7876"} {"text":"data_7877"} {"text":"data_7878"} {"text":"data_7879"} {"text":"data_7880"} {"text":"data_7881"} {"text":"data_7882"} {"text":"data_7883"} {"text":"data_7884"} {"text":"data_7885"} {"text":"data_7886"} {"text":"data_7887"} {"text":"data_7888"} {"text":"data_7889"} {"text":"data_7890"} {"text":"data_7891"} {"text":"data_7892"} {"text":"data_7893"} {"text":"data_7894"} {"text":"data_7895"} {"text":"data_7896"} {"text":"data_7897"} {"text":"data_7898"} {"text":"data_7899"} {"text":"data_7900"} {"text":"data_7901"} {"text":"data_7902"} {"text":"data_7903"} {"text":"data_7904"} {"text":"data_7905"} {"text":"data_7906"} {"text":"data_7907"} {"text":"data_7908"} {"text":"data_7909"} {"text":"data_7910"} {"text":"data_7911"} {"text":"data_7912"} {"text":"data_7913"} {"text":"data_7914"} {"text":"data_7915"} {"text":"data_7916"} {"text":"data_7917"} {"text":"data_7918"} {"text":"data_7919"} {"text":"data_7920"} {"text":"data_7921"} {"text":"data_7922"} {"text":"data_7923"} {"text":"data_7924"} {"text":"data_7925"} {"text":"data_7926"} {"text":"data_7927"} {"text":"data_7928"} {"text":"data_7929"} {"text":"data_7930"} {"text":"data_7931"} {"text":"data_7932"} {"text":"data_7933"} {"text":"data_7934"} {"text":"data_7935"} {"text":"data_7936"} {"text":"data_7937"} {"text":"data_7938"} {"text":"data_7939"} {"text":"data_7940"} {"text":"data_7941"} {"text":"data_7942"} {"text":"data_7943"} {"text":"data_7944"} {"text":"data_7945"} {"text":"data_7946"} {"text":"data_7947"} {"text":"data_7948"} {"text":"data_7949"} {"text":"data_7950"} {"text":"data_7951"} {"text":"data_7952"} {"text":"data_7953"} {"text":"data_7954"} {"text":"data_7955"} {"text":"data_7956"} {"text":"data_7957"} {"text":"data_7958"} {"text":"data_7959"} {"text":"data_7960"} {"text":"data_7961"} {"text":"data_7962"} {"text":"data_7963"} {"text":"data_7964"} {"text":"data_7965"} {"text":"data_7966"} {"text":"data_7967"} {"text":"data_7968"} {"text":"data_7969"} {"text":"data_7970"} {"text":"data_7971"} {"text":"data_7972"} {"text":"data_7973"} {"text":"data_7974"} {"text":"data_7975"} {"text":"data_7976"} {"text":"data_7977"} {"text":"data_7978"} {"text":"data_7979"} {"text":"data_7980"} {"text":"data_7981"} {"text":"data_7982"} {"text":"data_7983"} {"text":"data_7984"} {"text":"data_7985"} {"text":"data_7986"} {"text":"data_7987"} {"text":"data_7988"} {"text":"data_7989"} {"text":"data_7990"} {"text":"data_7991"} {"text":"data_7992"} {"text":"data_7993"} {"text":"data_7994"} {"text":"data_7995"} {"text":"data_7996"} {"text":"data_7997"} {"text":"data_7998"} {"text":"data_7999"} {"text":"data_8000"} {"text":"data_8001"} {"text":"data_8002"} {"text":"data_8003"} {"text":"data_8004"} {"text":"data_8005"} {"text":"data_8006"} {"text":"data_8007"} {"text":"data_8008"} {"text":"data_8009"} {"text":"data_8010"} {"text":"data_8011"} {"text":"data_8012"} {"text":"data_8013"} {"text":"data_8014"} {"text":"data_8015"} {"text":"data_8016"} {"text":"data_8017"} {"text":"data_8018"} {"text":"data_8019"} {"text":"data_8020"} {"text":"data_8021"} {"text":"data_8022"} {"text":"data_8023"} {"text":"data_8024"} {"text":"data_8025"} {"text":"data_8026"} {"text":"data_8027"} {"text":"data_8028"} {"text":"data_8029"} {"text":"data_8030"} {"text":"data_8031"} {"text":"data_8032"} {"text":"data_8033"} {"text":"data_8034"} {"text":"data_8035"} {"text":"data_8036"} {"text":"data_8037"} {"text":"data_8038"} {"text":"data_8039"} {"text":"data_8040"} {"text":"data_8041"} {"text":"data_8042"} {"text":"data_8043"} {"text":"data_8044"} {"text":"data_8045"} {"text":"data_8046"} {"text":"data_8047"} {"text":"data_8048"} {"text":"data_8049"} {"text":"data_8050"} {"text":"data_8051"} {"text":"data_8052"} {"text":"data_8053"} {"text":"data_8054"} {"text":"data_8055"} {"text":"data_8056"} {"text":"data_8057"} {"text":"data_8058"} {"text":"data_8059"} {"text":"data_8060"} {"text":"data_8061"} {"text":"data_8062"} {"text":"data_8063"} {"text":"data_8064"} {"text":"data_8065"} {"text":"data_8066"} {"text":"data_8067"} {"text":"data_8068"} {"text":"data_8069"} {"text":"data_8070"} {"text":"data_8071"} {"text":"data_8072"} {"text":"data_8073"} {"text":"data_8074"} {"text":"data_8075"} {"text":"data_8076"} {"text":"data_8077"} {"text":"data_8078"} {"text":"data_8079"} {"text":"data_8080"} {"text":"data_8081"} {"text":"data_8082"} {"text":"data_8083"} {"text":"data_8084"} {"text":"data_8085"} {"text":"data_8086"} {"text":"data_8087"} {"text":"data_8088"} {"text":"data_8089"} {"text":"data_8090"} {"text":"data_8091"} {"text":"data_8092"} {"text":"data_8093"} {"text":"data_8094"} {"text":"data_8095"} {"text":"data_8096"} {"text":"data_8097"} {"text":"data_8098"} {"text":"data_8099"} {"text":"data_8100"} {"text":"data_8101"} {"text":"data_8102"} {"text":"data_8103"} {"text":"data_8104"} {"text":"data_8105"} {"text":"data_8106"} {"text":"data_8107"} {"text":"data_8108"} {"text":"data_8109"} {"text":"data_8110"} {"text":"data_8111"} {"text":"data_8112"} {"text":"data_8113"} {"text":"data_8114"} {"text":"data_8115"} {"text":"data_8116"} {"text":"data_8117"} {"text":"data_8118"} {"text":"data_8119"} {"text":"data_8120"} {"text":"data_8121"} {"text":"data_8122"} {"text":"data_8123"} {"text":"data_8124"} {"text":"data_8125"} {"text":"data_8126"} {"text":"data_8127"} {"text":"data_8128"} {"text":"data_8129"} {"text":"data_8130"} {"text":"data_8131"} {"text":"data_8132"} {"text":"data_8133"} {"text":"data_8134"} {"text":"data_8135"} {"text":"data_8136"} {"text":"data_8137"} {"text":"data_8138"} {"text":"data_8139"} {"text":"data_8140"} {"text":"data_8141"} {"text":"data_8142"} {"text":"data_8143"} {"text":"data_8144"} {"text":"data_8145"} {"text":"data_8146"} {"text":"data_8147"} {"text":"data_8148"} {"text":"data_8149"} {"text":"data_8150"} {"text":"data_8151"} {"text":"data_8152"} {"text":"data_8153"} {"text":"data_8154"} {"text":"data_8155"} {"text":"data_8156"} {"text":"data_8157"} {"text":"data_8158"} {"text":"data_8159"} {"text":"data_8160"} {"text":"data_8161"} {"text":"data_8162"} {"text":"data_8163"} {"text":"data_8164"} {"text":"data_8165"} {"text":"data_8166"} {"text":"data_8167"} {"text":"data_8168"} {"text":"data_8169"} {"text":"data_8170"} {"text":"data_8171"} {"text":"data_8172"} {"text":"data_8173"} {"text":"data_8174"} {"text":"data_8175"} {"text":"data_8176"} {"text":"data_8177"} {"text":"data_8178"} {"text":"data_8179"} {"text":"data_8180"} {"text":"data_8181"} {"text":"data_8182"} {"text":"data_8183"} {"text":"data_8184"} {"text":"data_8185"} {"text":"data_8186"} {"text":"data_8187"} {"text":"data_8188"} {"text":"data_8189"} {"text":"data_8190"} {"text":"data_8191"} {"text":"data_8192"} {"text":"data_8193"} {"text":"data_8194"} {"text":"data_8195"} {"text":"data_8196"} {"text":"data_8197"} {"text":"data_8198"} {"text":"data_8199"} {"text":"data_8200"} {"text":"data_8201"} {"text":"data_8202"} {"text":"data_8203"} {"text":"data_8204"} {"text":"data_8205"} {"text":"data_8206"} {"text":"data_8207"} {"text":"data_8208"} {"text":"data_8209"} {"text":"data_8210"} {"text":"data_8211"} {"text":"data_8212"} {"text":"data_8213"} {"text":"data_8214"} {"text":"data_8215"} {"text":"data_8216"} {"text":"data_8217"} {"text":"data_8218"} {"text":"data_8219"} {"text":"data_8220"} {"text":"data_8221"} {"text":"data_8222"} {"text":"data_8223"} {"text":"data_8224"} {"text":"data_8225"} {"text":"data_8226"} {"text":"data_8227"} {"text":"data_8228"} {"text":"data_8229"} {"text":"data_8230"} {"text":"data_8231"} {"text":"data_8232"} {"text":"data_8233"} {"text":"data_8234"} {"text":"data_8235"} {"text":"data_8236"} {"text":"data_8237"} {"text":"data_8238"} {"text":"data_8239"} {"text":"data_8240"} {"text":"data_8241"} {"text":"data_8242"} {"text":"data_8243"} {"text":"data_8244"} {"text":"data_8245"} {"text":"data_8246"} {"text":"data_8247"} {"text":"data_8248"} {"text":"data_8249"} {"text":"data_8250"} {"text":"data_8251"} {"text":"data_8252"} {"text":"data_8253"} {"text":"data_8254"} {"text":"data_8255"} {"text":"data_8256"} {"text":"data_8257"} {"text":"data_8258"} {"text":"data_8259"} {"text":"data_8260"} {"text":"data_8261"} {"text":"data_8262"} {"text":"data_8263"} {"text":"data_8264"} {"text":"data_8265"} {"text":"data_8266"} {"text":"data_8267"} {"text":"data_8268"} {"text":"data_8269"} {"text":"data_8270"} {"text":"data_8271"} {"text":"data_8272"} {"text":"data_8273"} {"text":"data_8274"} {"text":"data_8275"} {"text":"data_8276"} {"text":"data_8277"} {"text":"data_8278"} {"text":"data_8279"} {"text":"data_8280"} {"text":"data_8281"} {"text":"data_8282"} {"text":"data_8283"} {"text":"data_8284"} {"text":"data_8285"} {"text":"data_8286"} {"text":"data_8287"} {"text":"data_8288"} {"text":"data_8289"} {"text":"data_8290"} {"text":"data_8291"} {"text":"data_8292"} {"text":"data_8293"} {"text":"data_8294"} {"text":"data_8295"} {"text":"data_8296"} {"text":"data_8297"} {"text":"data_8298"} {"text":"data_8299"} {"text":"data_8300"} {"text":"data_8301"} {"text":"data_8302"} {"text":"data_8303"} {"text":"data_8304"} {"text":"data_8305"} {"text":"data_8306"} {"text":"data_8307"} {"text":"data_8308"} {"text":"data_8309"} {"text":"data_8310"} {"text":"data_8311"} {"text":"data_8312"} {"text":"data_8313"} {"text":"data_8314"} {"text":"data_8315"} {"text":"data_8316"} {"text":"data_8317"} {"text":"data_8318"} {"text":"data_8319"} {"text":"data_8320"} {"text":"data_8321"} {"text":"data_8322"} {"text":"data_8323"} {"text":"data_8324"} {"text":"data_8325"} {"text":"data_8326"} {"text":"data_8327"} {"text":"data_8328"} {"text":"data_8329"} {"text":"data_8330"} {"text":"data_8331"} {"text":"data_8332"} {"text":"data_8333"} {"text":"data_8334"} {"text":"data_8335"} {"text":"data_8336"} {"text":"data_8337"} {"text":"data_8338"} {"text":"data_8339"} {"text":"data_8340"} {"text":"data_8341"} {"text":"data_8342"} {"text":"data_8343"} {"text":"data_8344"} {"text":"data_8345"} {"text":"data_8346"} {"text":"data_8347"} {"text":"data_8348"} {"text":"data_8349"} {"text":"data_8350"} {"text":"data_8351"} {"text":"data_8352"} {"text":"data_8353"} {"text":"data_8354"} {"text":"data_8355"} {"text":"data_8356"} {"text":"data_8357"} {"text":"data_8358"} {"text":"data_8359"} {"text":"data_8360"} {"text":"data_8361"} {"text":"data_8362"} {"text":"data_8363"} {"text":"data_8364"} {"text":"data_8365"} {"text":"data_8366"} {"text":"data_8367"} {"text":"data_8368"} {"text":"data_8369"} {"text":"data_8370"} {"text":"data_8371"} {"text":"data_8372"} {"text":"data_8373"} {"text":"data_8374"} {"text":"data_8375"} {"text":"data_8376"} {"text":"data_8377"} {"text":"data_8378"} {"text":"data_8379"} {"text":"data_8380"} {"text":"data_8381"} {"text":"data_8382"} {"text":"data_8383"} {"text":"data_8384"} {"text":"data_8385"} {"text":"data_8386"} {"text":"data_8387"} {"text":"data_8388"} {"text":"data_8389"} {"text":"data_8390"} {"text":"data_8391"} {"text":"data_8392"} {"text":"data_8393"} {"text":"data_8394"} {"text":"data_8395"} {"text":"data_8396"} {"text":"data_8397"} {"text":"data_8398"} {"text":"data_8399"} {"text":"data_8400"} {"text":"data_8401"} {"text":"data_8402"} {"text":"data_8403"} {"text":"data_8404"} {"text":"data_8405"} {"text":"data_8406"} {"text":"data_8407"} {"text":"data_8408"} {"text":"data_8409"} {"text":"data_8410"} {"text":"data_8411"} {"text":"data_8412"} {"text":"data_8413"} {"text":"data_8414"} {"text":"data_8415"} {"text":"data_8416"} {"text":"data_8417"} {"text":"data_8418"} {"text":"data_8419"} {"text":"data_8420"} {"text":"data_8421"} {"text":"data_8422"} {"text":"data_8423"} {"text":"data_8424"} {"text":"data_8425"} {"text":"data_8426"} {"text":"data_8427"} {"text":"data_8428"} {"text":"data_8429"} {"text":"data_8430"} {"text":"data_8431"} {"text":"data_8432"} {"text":"data_8433"} {"text":"data_8434"} {"text":"data_8435"} {"text":"data_8436"} {"text":"data_8437"} {"text":"data_8438"} {"text":"data_8439"} {"text":"data_8440"} {"text":"data_8441"} {"text":"data_8442"} {"text":"data_8443"} {"text":"data_8444"} {"text":"data_8445"} {"text":"data_8446"} {"text":"data_8447"} {"text":"data_8448"} {"text":"data_8449"} {"text":"data_8450"} {"text":"data_8451"} {"text":"data_8452"} {"text":"data_8453"} {"text":"data_8454"} {"text":"data_8455"} {"text":"data_8456"} {"text":"data_8457"} {"text":"data_8458"} {"text":"data_8459"} {"text":"data_8460"} {"text":"data_8461"} {"text":"data_8462"} {"text":"data_8463"} {"text":"data_8464"} {"text":"data_8465"} {"text":"data_8466"} {"text":"data_8467"} {"text":"data_8468"} {"text":"data_8469"} {"text":"data_8470"} {"text":"data_8471"} {"text":"data_8472"} {"text":"data_8473"} {"text":"data_8474"} {"text":"data_8475"} {"text":"data_8476"} {"text":"data_8477"} {"text":"data_8478"} {"text":"data_8479"} {"text":"data_8480"} {"text":"data_8481"} {"text":"data_8482"} {"text":"data_8483"} {"text":"data_8484"} {"text":"data_8485"} {"text":"data_8486"} {"text":"data_8487"} {"text":"data_8488"} {"text":"data_8489"} {"text":"data_8490"} {"text":"data_8491"} {"text":"data_8492"} {"text":"data_8493"} {"text":"data_8494"} {"text":"data_8495"} {"text":"data_8496"} {"text":"data_8497"} {"text":"data_8498"} {"text":"data_8499"} {"text":"data_8500"} {"text":"data_8501"} {"text":"data_8502"} {"text":"data_8503"} {"text":"data_8504"} {"text":"data_8505"} {"text":"data_8506"} {"text":"data_8507"} {"text":"data_8508"} {"text":"data_8509"} {"text":"data_8510"} {"text":"data_8511"} {"text":"data_8512"} {"text":"data_8513"} {"text":"data_8514"} {"text":"data_8515"} {"text":"data_8516"} {"text":"data_8517"} {"text":"data_8518"} {"text":"data_8519"} {"text":"data_8520"} {"text":"data_8521"} {"text":"data_8522"} {"text":"data_8523"} {"text":"data_8524"} {"text":"data_8525"} {"text":"data_8526"} {"text":"data_8527"} {"text":"data_8528"} {"text":"data_8529"} {"text":"data_8530"} {"text":"data_8531"} {"text":"data_8532"} {"text":"data_8533"} {"text":"data_8534"} {"text":"data_8535"} {"text":"data_8536"} {"text":"data_8537"} {"text":"data_8538"} {"text":"data_8539"} {"text":"data_8540"} {"text":"data_8541"} {"text":"data_8542"} {"text":"data_8543"} {"text":"data_8544"} {"text":"data_8545"} {"text":"data_8546"} {"text":"data_8547"} {"text":"data_8548"} {"text":"data_8549"} {"text":"data_8550"} {"text":"data_8551"} {"text":"data_8552"} {"text":"data_8553"} {"text":"data_8554"} {"text":"data_8555"} {"text":"data_8556"} {"text":"data_8557"} {"text":"data_8558"} {"text":"data_8559"} {"text":"data_8560"} {"text":"data_8561"} {"text":"data_8562"} {"text":"data_8563"} {"text":"data_8564"} {"text":"data_8565"} {"text":"data_8566"} {"text":"data_8567"} {"text":"data_8568"} {"text":"data_8569"} {"text":"data_8570"} {"text":"data_8571"} {"text":"data_8572"} {"text":"data_8573"} {"text":"data_8574"} {"text":"data_8575"} {"text":"data_8576"} {"text":"data_8577"} {"text":"data_8578"} {"text":"data_8579"} {"text":"data_8580"} {"text":"data_8581"} {"text":"data_8582"} {"text":"data_8583"} {"text":"data_8584"} {"text":"data_8585"} {"text":"data_8586"} {"text":"data_8587"} {"text":"data_8588"} {"text":"data_8589"} {"text":"data_8590"} {"text":"data_8591"} {"text":"data_8592"} {"text":"data_8593"} {"text":"data_8594"} {"text":"data_8595"} {"text":"data_8596"} {"text":"data_8597"} {"text":"data_8598"} {"text":"data_8599"} {"text":"data_8600"} {"text":"data_8601"} {"text":"data_8602"} {"text":"data_8603"} {"text":"data_8604"} {"text":"data_8605"} {"text":"data_8606"} {"text":"data_8607"} {"text":"data_8608"} {"text":"data_8609"} {"text":"data_8610"} {"text":"data_8611"} {"text":"data_8612"} {"text":"data_8613"} {"text":"data_8614"} {"text":"data_8615"} {"text":"data_8616"} {"text":"data_8617"} {"text":"data_8618"} {"text":"data_8619"} {"text":"data_8620"} {"text":"data_8621"} {"text":"data_8622"} {"text":"data_8623"} {"text":"data_8624"} {"text":"data_8625"} {"text":"data_8626"} {"text":"data_8627"} {"text":"data_8628"} {"text":"data_8629"} {"text":"data_8630"} {"text":"data_8631"} {"text":"data_8632"} {"text":"data_8633"} {"text":"data_8634"} {"text":"data_8635"} {"text":"data_8636"} {"text":"data_8637"} {"text":"data_8638"} {"text":"data_8639"} {"text":"data_8640"} {"text":"data_8641"} {"text":"data_8642"} {"text":"data_8643"} {"text":"data_8644"} {"text":"data_8645"} {"text":"data_8646"} {"text":"data_8647"} {"text":"data_8648"} {"text":"data_8649"} {"text":"data_8650"} {"text":"data_8651"} {"text":"data_8652"} {"text":"data_8653"} {"text":"data_8654"} {"text":"data_8655"} {"text":"data_8656"} {"text":"data_8657"} {"text":"data_8658"} {"text":"data_8659"} {"text":"data_8660"} {"text":"data_8661"} {"text":"data_8662"} {"text":"data_8663"} {"text":"data_8664"} {"text":"data_8665"} {"text":"data_8666"} {"text":"data_8667"} {"text":"data_8668"} {"text":"data_8669"} {"text":"data_8670"} {"text":"data_8671"} {"text":"data_8672"} {"text":"data_8673"} {"text":"data_8674"} {"text":"data_8675"} {"text":"data_8676"} {"text":"data_8677"} {"text":"data_8678"} {"text":"data_8679"} {"text":"data_8680"} {"text":"data_8681"} {"text":"data_8682"} {"text":"data_8683"} {"text":"data_8684"} {"text":"data_8685"} {"text":"data_8686"} {"text":"data_8687"} {"text":"data_8688"} {"text":"data_8689"} {"text":"data_8690"} {"text":"data_8691"} {"text":"data_8692"} {"text":"data_8693"} {"text":"data_8694"} {"text":"data_8695"} {"text":"data_8696"} {"text":"data_8697"} {"text":"data_8698"} {"text":"data_8699"} {"text":"data_8700"} {"text":"data_8701"} {"text":"data_8702"} {"text":"data_8703"} {"text":"data_8704"} {"text":"data_8705"} {"text":"data_8706"} {"text":"data_8707"} {"text":"data_8708"} {"text":"data_8709"} {"text":"data_8710"} {"text":"data_8711"} {"text":"data_8712"} {"text":"data_8713"} {"text":"data_8714"} {"text":"data_8715"} {"text":"data_8716"} {"text":"data_8717"} {"text":"data_8718"} {"text":"data_8719"} {"text":"data_8720"} {"text":"data_8721"} {"text":"data_8722"} {"text":"data_8723"} {"text":"data_8724"} {"text":"data_8725"} {"text":"data_8726"} {"text":"data_8727"} {"text":"data_8728"} {"text":"data_8729"} {"text":"data_8730"} {"text":"data_8731"} {"text":"data_8732"} {"text":"data_8733"} {"text":"data_8734"} {"text":"data_8735"} {"text":"data_8736"} {"text":"data_8737"} {"text":"data_8738"} {"text":"data_8739"} {"text":"data_8740"} {"text":"data_8741"} {"text":"data_8742"} {"text":"data_8743"} {"text":"data_8744"} {"text":"data_8745"} {"text":"data_8746"} {"text":"data_8747"} {"text":"data_8748"} {"text":"data_8749"} {"text":"data_8750"} {"text":"data_8751"} {"text":"data_8752"} {"text":"data_8753"} {"text":"data_8754"} {"text":"data_8755"} {"text":"data_8756"} {"text":"data_8757"} {"text":"data_8758"} {"text":"data_8759"} {"text":"data_8760"} {"text":"data_8761"} {"text":"data_8762"} {"text":"data_8763"} {"text":"data_8764"} {"text":"data_8765"} {"text":"data_8766"} {"text":"data_8767"} {"text":"data_8768"} {"text":"data_8769"} {"text":"data_8770"} {"text":"data_8771"} {"text":"data_8772"} {"text":"data_8773"} {"text":"data_8774"} {"text":"data_8775"} {"text":"data_8776"} {"text":"data_8777"} {"text":"data_8778"} {"text":"data_8779"} {"text":"data_8780"} {"text":"data_8781"} {"text":"data_8782"} {"text":"data_8783"} {"text":"data_8784"} {"text":"data_8785"} {"text":"data_8786"} {"text":"data_8787"} {"text":"data_8788"} {"text":"data_8789"} {"text":"data_8790"} {"text":"data_8791"} {"text":"data_8792"} {"text":"data_8793"} {"text":"data_8794"} {"text":"data_8795"} {"text":"data_8796"} {"text":"data_8797"} {"text":"data_8798"} {"text":"data_8799"} {"text":"data_8800"} {"text":"data_8801"} {"text":"data_8802"} {"text":"data_8803"} {"text":"data_8804"} {"text":"data_8805"} {"text":"data_8806"} {"text":"data_8807"} {"text":"data_8808"} {"text":"data_8809"} {"text":"data_8810"} {"text":"data_8811"} {"text":"data_8812"} {"text":"data_8813"} {"text":"data_8814"} {"text":"data_8815"} {"text":"data_8816"} {"text":"data_8817"} {"text":"data_8818"} {"text":"data_8819"} {"text":"data_8820"} {"text":"data_8821"} {"text":"data_8822"} {"text":"data_8823"} {"text":"data_8824"} {"text":"data_8825"} {"text":"data_8826"} {"text":"data_8827"} {"text":"data_8828"} {"text":"data_8829"} {"text":"data_8830"} {"text":"data_8831"} {"text":"data_8832"} {"text":"data_8833"} {"text":"data_8834"} {"text":"data_8835"} {"text":"data_8836"} {"text":"data_8837"} {"text":"data_8838"} {"text":"data_8839"} {"text":"data_8840"} {"text":"data_8841"} {"text":"data_8842"} {"text":"data_8843"} {"text":"data_8844"} {"text":"data_8845"} {"text":"data_8846"} {"text":"data_8847"} {"text":"data_8848"} {"text":"data_8849"} {"text":"data_8850"} {"text":"data_8851"} {"text":"data_8852"} {"text":"data_8853"} {"text":"data_8854"} {"text":"data_8855"} {"text":"data_8856"} {"text":"data_8857"} {"text":"data_8858"} {"text":"data_8859"} {"text":"data_8860"} {"text":"data_8861"} {"text":"data_8862"} {"text":"data_8863"} {"text":"data_8864"} {"text":"data_8865"} {"text":"data_8866"} {"text":"data_8867"} {"text":"data_8868"} {"text":"data_8869"} {"text":"data_8870"} {"text":"data_8871"} {"text":"data_8872"} {"text":"data_8873"} {"text":"data_8874"} {"text":"data_8875"} {"text":"data_8876"} {"text":"data_8877"} {"text":"data_8878"} {"text":"data_8879"} {"text":"data_8880"} {"text":"data_8881"} {"text":"data_8882"} {"text":"data_8883"} {"text":"data_8884"} {"text":"data_8885"} {"text":"data_8886"} {"text":"data_8887"} {"text":"data_8888"} {"text":"data_8889"} {"text":"data_8890"} {"text":"data_8891"} {"text":"data_8892"} {"text":"data_8893"} {"text":"data_8894"} {"text":"data_8895"} {"text":"data_8896"} {"text":"data_8897"} {"text":"data_8898"} {"text":"data_8899"} {"text":"data_8900"} {"text":"data_8901"} {"text":"data_8902"} {"text":"data_8903"} {"text":"data_8904"} {"text":"data_8905"} {"text":"data_8906"} {"text":"data_8907"} {"text":"data_8908"} {"text":"data_8909"} {"text":"data_8910"} {"text":"data_8911"} {"text":"data_8912"} {"text":"data_8913"} {"text":"data_8914"} {"text":"data_8915"} {"text":"data_8916"} {"text":"data_8917"} {"text":"data_8918"} {"text":"data_8919"} {"text":"data_8920"} {"text":"data_8921"} {"text":"data_8922"} {"text":"data_8923"} {"text":"data_8924"} {"text":"data_8925"} {"text":"data_8926"} {"text":"data_8927"} {"text":"data_8928"} {"text":"data_8929"} {"text":"data_8930"} {"text":"data_8931"} {"text":"data_8932"} {"text":"data_8933"} {"text":"data_8934"} {"text":"data_8935"} {"text":"data_8936"} {"text":"data_8937"} {"text":"data_8938"} {"text":"data_8939"} {"text":"data_8940"} {"text":"data_8941"} {"text":"data_8942"} {"text":"data_8943"} {"text":"data_8944"} {"text":"data_8945"} {"text":"data_8946"} {"text":"data_8947"} {"text":"data_8948"} {"text":"data_8949"} {"text":"data_8950"} {"text":"data_8951"} {"text":"data_8952"} {"text":"data_8953"} {"text":"data_8954"} {"text":"data_8955"} {"text":"data_8956"} {"text":"data_8957"} {"text":"data_8958"} {"text":"data_8959"} {"text":"data_8960"} {"text":"data_8961"} {"text":"data_8962"} {"text":"data_8963"} {"text":"data_8964"} {"text":"data_8965"} {"text":"data_8966"} {"text":"data_8967"} {"text":"data_8968"} {"text":"data_8969"} {"text":"data_8970"} {"text":"data_8971"} {"text":"data_8972"} {"text":"data_8973"} {"text":"data_8974"} {"text":"data_8975"} {"text":"data_8976"} {"text":"data_8977"} {"text":"data_8978"} {"text":"data_8979"} {"text":"data_8980"} {"text":"data_8981"} {"text":"data_8982"} {"text":"data_8983"} {"text":"data_8984"} {"text":"data_8985"} {"text":"data_8986"} {"text":"data_8987"} {"text":"data_8988"} {"text":"data_8989"} {"text":"data_8990"} {"text":"data_8991"} {"text":"data_8992"} {"text":"data_8993"} {"text":"data_8994"} {"text":"data_8995"} {"text":"data_8996"} {"text":"data_8997"} {"text":"data_8998"} {"text":"data_8999"} {"text":"data_9000"} {"text":"data_9001"} {"text":"data_9002"} {"text":"data_9003"} {"text":"data_9004"} {"text":"data_9005"} {"text":"data_9006"} {"text":"data_9007"} {"text":"data_9008"} {"text":"data_9009"} {"text":"data_9010"} {"text":"data_9011"} {"text":"data_9012"} {"text":"data_9013"} {"text":"data_9014"} {"text":"data_9015"} {"text":"data_9016"} {"text":"data_9017"} {"text":"data_9018"} {"text":"data_9019"} {"text":"data_9020"} {"text":"data_9021"} {"text":"data_9022"} {"text":"data_9023"} {"text":"data_9024"} {"text":"data_9025"} {"text":"data_9026"} {"text":"data_9027"} {"text":"data_9028"} {"text":"data_9029"} {"text":"data_9030"} {"text":"data_9031"} {"text":"data_9032"} {"text":"data_9033"} {"text":"data_9034"} {"text":"data_9035"} {"text":"data_9036"} {"text":"data_9037"} {"text":"data_9038"} {"text":"data_9039"} {"text":"data_9040"} {"text":"data_9041"} {"text":"data_9042"} {"text":"data_9043"} {"text":"data_9044"} {"text":"data_9045"} {"text":"data_9046"} {"text":"data_9047"} {"text":"data_9048"} {"text":"data_9049"} {"text":"data_9050"} {"text":"data_9051"} {"text":"data_9052"} {"text":"data_9053"} {"text":"data_9054"} {"text":"data_9055"} {"text":"data_9056"} {"text":"data_9057"} {"text":"data_9058"} {"text":"data_9059"} {"text":"data_9060"} {"text":"data_9061"} {"text":"data_9062"} {"text":"data_9063"} {"text":"data_9064"} {"text":"data_9065"} {"text":"data_9066"} {"text":"data_9067"} {"text":"data_9068"} {"text":"data_9069"} {"text":"data_9070"} {"text":"data_9071"} {"text":"data_9072"} {"text":"data_9073"} {"text":"data_9074"} {"text":"data_9075"} {"text":"data_9076"} {"text":"data_9077"} {"text":"data_9078"} {"text":"data_9079"} {"text":"data_9080"} {"text":"data_9081"} {"text":"data_9082"} {"text":"data_9083"} {"text":"data_9084"} {"text":"data_9085"} {"text":"data_9086"} {"text":"data_9087"} {"text":"data_9088"} {"text":"data_9089"} {"text":"data_9090"} {"text":"data_9091"} {"text":"data_9092"} {"text":"data_9093"} {"text":"data_9094"} {"text":"data_9095"} {"text":"data_9096"} {"text":"data_9097"} {"text":"data_9098"} {"text":"data_9099"} {"text":"data_9100"} {"text":"data_9101"} {"text":"data_9102"} {"text":"data_9103"} {"text":"data_9104"} {"text":"data_9105"} {"text":"data_9106"} {"text":"data_9107"} {"text":"data_9108"} {"text":"data_9109"} {"text":"data_9110"} {"text":"data_9111"} {"text":"data_9112"} {"text":"data_9113"} {"text":"data_9114"} {"text":"data_9115"} {"text":"data_9116"} {"text":"data_9117"} {"text":"data_9118"} {"text":"data_9119"} {"text":"data_9120"} {"text":"data_9121"} {"text":"data_9122"} {"text":"data_9123"} {"text":"data_9124"} {"text":"data_9125"} {"text":"data_9126"} {"text":"data_9127"} {"text":"data_9128"} {"text":"data_9129"} {"text":"data_9130"} {"text":"data_9131"} {"text":"data_9132"} {"text":"data_9133"} {"text":"data_9134"} {"text":"data_9135"} {"text":"data_9136"} {"text":"data_9137"} {"text":"data_9138"} {"text":"data_9139"} {"text":"data_9140"} {"text":"data_9141"} {"text":"data_9142"} {"text":"data_9143"} {"text":"data_9144"} {"text":"data_9145"} {"text":"data_9146"} {"text":"data_9147"} {"text":"data_9148"} {"text":"data_9149"} {"text":"data_9150"} {"text":"data_9151"} {"text":"data_9152"} {"text":"data_9153"} {"text":"data_9154"} {"text":"data_9155"} {"text":"data_9156"} {"text":"data_9157"} {"text":"data_9158"} {"text":"data_9159"} {"text":"data_9160"} {"text":"data_9161"} {"text":"data_9162"} {"text":"data_9163"} {"text":"data_9164"} {"text":"data_9165"} {"text":"data_9166"} {"text":"data_9167"} {"text":"data_9168"} {"text":"data_9169"} {"text":"data_9170"} {"text":"data_9171"} {"text":"data_9172"} {"text":"data_9173"} {"text":"data_9174"} {"text":"data_9175"} {"text":"data_9176"} {"text":"data_9177"} {"text":"data_9178"} {"text":"data_9179"} {"text":"data_9180"} {"text":"data_9181"} {"text":"data_9182"} {"text":"data_9183"} {"text":"data_9184"} {"text":"data_9185"} {"text":"data_9186"} {"text":"data_9187"} {"text":"data_9188"} {"text":"data_9189"} {"text":"data_9190"} {"text":"data_9191"} {"text":"data_9192"} {"text":"data_9193"} {"text":"data_9194"} {"text":"data_9195"} {"text":"data_9196"} {"text":"data_9197"} {"text":"data_9198"} {"text":"data_9199"} {"text":"data_9200"} {"text":"data_9201"} {"text":"data_9202"} {"text":"data_9203"} {"text":"data_9204"} {"text":"data_9205"} {"text":"data_9206"} {"text":"data_9207"} {"text":"data_9208"} {"text":"data_9209"} {"text":"data_9210"} {"text":"data_9211"} {"text":"data_9212"} {"text":"data_9213"} {"text":"data_9214"} {"text":"data_9215"} {"text":"data_9216"} {"text":"data_9217"} {"text":"data_9218"} {"text":"data_9219"} {"text":"data_9220"} {"text":"data_9221"} {"text":"data_9222"} {"text":"data_9223"} {"text":"data_9224"} {"text":"data_9225"} {"text":"data_9226"} {"text":"data_9227"} {"text":"data_9228"} {"text":"data_9229"} {"text":"data_9230"} {"text":"data_9231"} {"text":"data_9232"} {"text":"data_9233"} {"text":"data_9234"} {"text":"data_9235"} {"text":"data_9236"} {"text":"data_9237"} {"text":"data_9238"} {"text":"data_9239"} {"text":"data_9240"} {"text":"data_9241"} {"text":"data_9242"} {"text":"data_9243"} {"text":"data_9244"} {"text":"data_9245"} {"text":"data_9246"} {"text":"data_9247"} {"text":"data_9248"} {"text":"data_9249"} {"text":"data_9250"} {"text":"data_9251"} {"text":"data_9252"} {"text":"data_9253"} {"text":"data_9254"} {"text":"data_9255"} {"text":"data_9256"} {"text":"data_9257"} {"text":"data_9258"} {"text":"data_9259"} {"text":"data_9260"} {"text":"data_9261"} {"text":"data_9262"} {"text":"data_9263"} {"text":"data_9264"} {"text":"data_9265"} {"text":"data_9266"} {"text":"data_9267"} {"text":"data_9268"} {"text":"data_9269"} {"text":"data_9270"} {"text":"data_9271"} {"text":"data_9272"} {"text":"data_9273"} {"text":"data_9274"} {"text":"data_9275"} {"text":"data_9276"} {"text":"data_9277"} {"text":"data_9278"} {"text":"data_9279"} {"text":"data_9280"} {"text":"data_9281"} {"text":"data_9282"} {"text":"data_9283"} {"text":"data_9284"} {"text":"data_9285"} {"text":"data_9286"} {"text":"data_9287"} {"text":"data_9288"} {"text":"data_9289"} {"text":"data_9290"} {"text":"data_9291"} {"text":"data_9292"} {"text":"data_9293"} {"text":"data_9294"} {"text":"data_9295"} {"text":"data_9296"} {"text":"data_9297"} {"text":"data_9298"} {"text":"data_9299"} {"text":"data_9300"} {"text":"data_9301"} {"text":"data_9302"} {"text":"data_9303"} {"text":"data_9304"} {"text":"data_9305"} {"text":"data_9306"} {"text":"data_9307"} {"text":"data_9308"} {"text":"data_9309"} {"text":"data_9310"} {"text":"data_9311"} {"text":"data_9312"} {"text":"data_9313"} {"text":"data_9314"} {"text":"data_9315"} {"text":"data_9316"} {"text":"data_9317"} {"text":"data_9318"} {"text":"data_9319"} {"text":"data_9320"} {"text":"data_9321"} {"text":"data_9322"} {"text":"data_9323"} {"text":"data_9324"} {"text":"data_9325"} {"text":"data_9326"} {"text":"data_9327"} {"text":"data_9328"} {"text":"data_9329"} {"text":"data_9330"} {"text":"data_9331"} {"text":"data_9332"} {"text":"data_9333"} {"text":"data_9334"} {"text":"data_9335"} {"text":"data_9336"} {"text":"data_9337"} {"text":"data_9338"} {"text":"data_9339"} {"text":"data_9340"} {"text":"data_9341"} {"text":"data_9342"} {"text":"data_9343"} {"text":"data_9344"} {"text":"data_9345"} {"text":"data_9346"} {"text":"data_9347"} {"text":"data_9348"} {"text":"data_9349"} {"text":"data_9350"} {"text":"data_9351"} {"text":"data_9352"} {"text":"data_9353"} {"text":"data_9354"} {"text":"data_9355"} {"text":"data_9356"} {"text":"data_9357"} {"text":"data_9358"} {"text":"data_9359"} {"text":"data_9360"} {"text":"data_9361"} {"text":"data_9362"} {"text":"data_9363"} {"text":"data_9364"} {"text":"data_9365"} {"text":"data_9366"} {"text":"data_9367"} {"text":"data_9368"} {"text":"data_9369"} {"text":"data_9370"} {"text":"data_9371"} {"text":"data_9372"} {"text":"data_9373"} {"text":"data_9374"} {"text":"data_9375"} {"text":"data_9376"} {"text":"data_9377"} {"text":"data_9378"} {"text":"data_9379"} {"text":"data_9380"} {"text":"data_9381"} {"text":"data_9382"} {"text":"data_9383"} {"text":"data_9384"} {"text":"data_9385"} {"text":"data_9386"} {"text":"data_9387"} {"text":"data_9388"} {"text":"data_9389"} {"text":"data_9390"} {"text":"data_9391"} {"text":"data_9392"} {"text":"data_9393"} {"text":"data_9394"} {"text":"data_9395"} {"text":"data_9396"} {"text":"data_9397"} {"text":"data_9398"} {"text":"data_9399"} {"text":"data_9400"} {"text":"data_9401"} {"text":"data_9402"} {"text":"data_9403"} {"text":"data_9404"} {"text":"data_9405"} {"text":"data_9406"} {"text":"data_9407"} {"text":"data_9408"} {"text":"data_9409"} {"text":"data_9410"} {"text":"data_9411"} {"text":"data_9412"} {"text":"data_9413"} {"text":"data_9414"} {"text":"data_9415"} {"text":"data_9416"} {"text":"data_9417"} {"text":"data_9418"} {"text":"data_9419"} {"text":"data_9420"} {"text":"data_9421"} {"text":"data_9422"} {"text":"data_9423"} {"text":"data_9424"} {"text":"data_9425"} {"text":"data_9426"} {"text":"data_9427"} {"text":"data_9428"} {"text":"data_9429"} {"text":"data_9430"} {"text":"data_9431"} {"text":"data_9432"} {"text":"data_9433"} {"text":"data_9434"} {"text":"data_9435"} {"text":"data_9436"} {"text":"data_9437"} {"text":"data_9438"} {"text":"data_9439"} {"text":"data_9440"} {"text":"data_9441"} {"text":"data_9442"} {"text":"data_9443"} {"text":"data_9444"} {"text":"data_9445"} {"text":"data_9446"} {"text":"data_9447"} {"text":"data_9448"} {"text":"data_9449"} {"text":"data_9450"} {"text":"data_9451"} {"text":"data_9452"} {"text":"data_9453"} {"text":"data_9454"} {"text":"data_9455"} {"text":"data_9456"} {"text":"data_9457"} {"text":"data_9458"} {"text":"data_9459"} {"text":"data_9460"} {"text":"data_9461"} {"text":"data_9462"} {"text":"data_9463"} {"text":"data_9464"} {"text":"data_9465"} {"text":"data_9466"} {"text":"data_9467"} {"text":"data_9468"} {"text":"data_9469"} {"text":"data_9470"} {"text":"data_9471"} {"text":"data_9472"} {"text":"data_9473"} {"text":"data_9474"} {"text":"data_9475"} {"text":"data_9476"} {"text":"data_9477"} {"text":"data_9478"} {"text":"data_9479"} {"text":"data_9480"} {"text":"data_9481"} {"text":"data_9482"} {"text":"data_9483"} {"text":"data_9484"} {"text":"data_9485"} {"text":"data_9486"} {"text":"data_9487"} {"text":"data_9488"} {"text":"data_9489"} {"text":"data_9490"} {"text":"data_9491"} {"text":"data_9492"} {"text":"data_9493"} {"text":"data_9494"} {"text":"data_9495"} {"text":"data_9496"} {"text":"data_9497"} {"text":"data_9498"} {"text":"data_9499"} {"text":"data_9500"} {"text":"data_9501"} {"text":"data_9502"} {"text":"data_9503"} {"text":"data_9504"} {"text":"data_9505"} {"text":"data_9506"} {"text":"data_9507"} {"text":"data_9508"} {"text":"data_9509"} {"text":"data_9510"} {"text":"data_9511"} {"text":"data_9512"} {"text":"data_9513"} {"text":"data_9514"} {"text":"data_9515"} {"text":"data_9516"} {"text":"data_9517"} {"text":"data_9518"} {"text":"data_9519"} {"text":"data_9520"} {"text":"data_9521"} {"text":"data_9522"} {"text":"data_9523"} {"text":"data_9524"} {"text":"data_9525"} {"text":"data_9526"} {"text":"data_9527"} {"text":"data_9528"} {"text":"data_9529"} {"text":"data_9530"} {"text":"data_9531"} {"text":"data_9532"} {"text":"data_9533"} {"text":"data_9534"} {"text":"data_9535"} {"text":"data_9536"} {"text":"data_9537"} {"text":"data_9538"} {"text":"data_9539"} {"text":"data_9540"} {"text":"data_9541"} {"text":"data_9542"} {"text":"data_9543"} {"text":"data_9544"} {"text":"data_9545"} {"text":"data_9546"} {"text":"data_9547"} {"text":"data_9548"} {"text":"data_9549"} {"text":"data_9550"} {"text":"data_9551"} {"text":"data_9552"} {"text":"data_9553"} {"text":"data_9554"} {"text":"data_9555"} {"text":"data_9556"} {"text":"data_9557"} {"text":"data_9558"} {"text":"data_9559"} {"text":"data_9560"} {"text":"data_9561"} {"text":"data_9562"} {"text":"data_9563"} {"text":"data_9564"} {"text":"data_9565"} {"text":"data_9566"} {"text":"data_9567"} {"text":"data_9568"} {"text":"data_9569"} {"text":"data_9570"} {"text":"data_9571"} {"text":"data_9572"} {"text":"data_9573"} {"text":"data_9574"} {"text":"data_9575"} {"text":"data_9576"} {"text":"data_9577"} {"text":"data_9578"} {"text":"data_9579"} {"text":"data_9580"} {"text":"data_9581"} {"text":"data_9582"} {"text":"data_9583"} {"text":"data_9584"} {"text":"data_9585"} {"text":"data_9586"} {"text":"data_9587"} {"text":"data_9588"} {"text":"data_9589"} {"text":"data_9590"} {"text":"data_9591"} {"text":"data_9592"} {"text":"data_9593"} {"text":"data_9594"} {"text":"data_9595"} {"text":"data_9596"} {"text":"data_9597"} {"text":"data_9598"} {"text":"data_9599"} {"text":"data_9600"} {"text":"data_9601"} {"text":"data_9602"} {"text":"data_9603"} {"text":"data_9604"} {"text":"data_9605"} {"text":"data_9606"} {"text":"data_9607"} {"text":"data_9608"} {"text":"data_9609"} {"text":"data_9610"} {"text":"data_9611"} {"text":"data_9612"} {"text":"data_9613"} {"text":"data_9614"} {"text":"data_9615"} {"text":"data_9616"} {"text":"data_9617"} {"text":"data_9618"} {"text":"data_9619"} {"text":"data_9620"} {"text":"data_9621"} {"text":"data_9622"} {"text":"data_9623"} {"text":"data_9624"} {"text":"data_9625"} {"text":"data_9626"} {"text":"data_9627"} {"text":"data_9628"} {"text":"data_9629"} {"text":"data_9630"} {"text":"data_9631"} {"text":"data_9632"} {"text":"data_9633"} {"text":"data_9634"} {"text":"data_9635"} {"text":"data_9636"} {"text":"data_9637"} {"text":"data_9638"} {"text":"data_9639"} {"text":"data_9640"} {"text":"data_9641"} {"text":"data_9642"} {"text":"data_9643"} {"text":"data_9644"} {"text":"data_9645"} {"text":"data_9646"} {"text":"data_9647"} {"text":"data_9648"} {"text":"data_9649"} {"text":"data_9650"} {"text":"data_9651"} {"text":"data_9652"} {"text":"data_9653"} {"text":"data_9654"} {"text":"data_9655"} {"text":"data_9656"} {"text":"data_9657"} {"text":"data_9658"} {"text":"data_9659"} {"text":"data_9660"} {"text":"data_9661"} {"text":"data_9662"} {"text":"data_9663"} {"text":"data_9664"} {"text":"data_9665"} {"text":"data_9666"} {"text":"data_9667"} {"text":"data_9668"} {"text":"data_9669"} {"text":"data_9670"} {"text":"data_9671"} {"text":"data_9672"} {"text":"data_9673"} {"text":"data_9674"} {"text":"data_9675"} {"text":"data_9676"} {"text":"data_9677"} {"text":"data_9678"} {"text":"data_9679"} {"text":"data_9680"} {"text":"data_9681"} {"text":"data_9682"} {"text":"data_9683"} {"text":"data_9684"} {"text":"data_9685"} {"text":"data_9686"} {"text":"data_9687"} {"text":"data_9688"} {"text":"data_9689"} {"text":"data_9690"} {"text":"data_9691"} {"text":"data_9692"} {"text":"data_9693"} {"text":"data_9694"} {"text":"data_9695"} {"text":"data_9696"} {"text":"data_9697"} {"text":"data_9698"} {"text":"data_9699"} {"text":"data_9700"} {"text":"data_9701"} {"text":"data_9702"} {"text":"data_9703"} {"text":"data_9704"} {"text":"data_9705"} {"text":"data_9706"} {"text":"data_9707"} {"text":"data_9708"} {"text":"data_9709"} {"text":"data_9710"} {"text":"data_9711"} {"text":"data_9712"} {"text":"data_9713"} {"text":"data_9714"} {"text":"data_9715"} {"text":"data_9716"} {"text":"data_9717"} {"text":"data_9718"} {"text":"data_9719"} {"text":"data_9720"} {"text":"data_9721"} {"text":"data_9722"} {"text":"data_9723"} {"text":"data_9724"} {"text":"data_9725"} {"text":"data_9726"} {"text":"data_9727"} {"text":"data_9728"} {"text":"data_9729"} {"text":"data_9730"} {"text":"data_9731"} {"text":"data_9732"} {"text":"data_9733"} {"text":"data_9734"} {"text":"data_9735"} {"text":"data_9736"} {"text":"data_9737"} {"text":"data_9738"} {"text":"data_9739"} {"text":"data_9740"} {"text":"data_9741"} {"text":"data_9742"} {"text":"data_9743"} {"text":"data_9744"} {"text":"data_9745"} {"text":"data_9746"} {"text":"data_9747"} {"text":"data_9748"} {"text":"data_9749"} {"text":"data_9750"} {"text":"data_9751"} {"text":"data_9752"} {"text":"data_9753"} {"text":"data_9754"} {"text":"data_9755"} {"text":"data_9756"} {"text":"data_9757"} {"text":"data_9758"} {"text":"data_9759"} {"text":"data_9760"} {"text":"data_9761"} {"text":"data_9762"} {"text":"data_9763"} {"text":"data_9764"} {"text":"data_9765"} {"text":"data_9766"} {"text":"data_9767"} {"text":"data_9768"} {"text":"data_9769"} {"text":"data_9770"} {"text":"data_9771"} {"text":"data_9772"} {"text":"data_9773"} {"text":"data_9774"} {"text":"data_9775"} {"text":"data_9776"} {"text":"data_9777"} {"text":"data_9778"} {"text":"data_9779"} {"text":"data_9780"} {"text":"data_9781"} {"text":"data_9782"} {"text":"data_9783"} {"text":"data_9784"} {"text":"data_9785"} {"text":"data_9786"} {"text":"data_9787"} {"text":"data_9788"} {"text":"data_9789"} {"text":"data_9790"} {"text":"data_9791"} {"text":"data_9792"} {"text":"data_9793"} {"text":"data_9794"} {"text":"data_9795"} {"text":"data_9796"} {"text":"data_9797"} {"text":"data_9798"} {"text":"data_9799"} {"text":"data_9800"} {"text":"data_9801"} {"text":"data_9802"} {"text":"data_9803"} {"text":"data_9804"} {"text":"data_9805"} {"text":"data_9806"} {"text":"data_9807"} {"text":"data_9808"} {"text":"data_9809"} {"text":"data_9810"} {"text":"data_9811"} {"text":"data_9812"} {"text":"data_9813"} {"text":"data_9814"} {"text":"data_9815"} {"text":"data_9816"} {"text":"data_9817"} {"text":"data_9818"} {"text":"data_9819"} {"text":"data_9820"} {"text":"data_9821"} {"text":"data_9822"} {"text":"data_9823"} {"text":"data_9824"} {"text":"data_9825"} {"text":"data_9826"} {"text":"data_9827"} {"text":"data_9828"} {"text":"data_9829"} {"text":"data_9830"} {"text":"data_9831"} {"text":"data_9832"} {"text":"data_9833"} {"text":"data_9834"} {"text":"data_9835"} {"text":"data_9836"} {"text":"data_9837"} {"text":"data_9838"} {"text":"data_9839"} {"text":"data_9840"} {"text":"data_9841"} {"text":"data_9842"} {"text":"data_9843"} {"text":"data_9844"} {"text":"data_9845"} {"text":"data_9846"} {"text":"data_9847"} {"text":"data_9848"} {"text":"data_9849"} {"text":"data_9850"} {"text":"data_9851"} {"text":"data_9852"} {"text":"data_9853"} {"text":"data_9854"} {"text":"data_9855"} {"text":"data_9856"} {"text":"data_9857"} {"text":"data_9858"} {"text":"data_9859"} {"text":"data_9860"} {"text":"data_9861"} {"text":"data_9862"} {"text":"data_9863"} {"text":"data_9864"} {"text":"data_9865"} {"text":"data_9866"} {"text":"data_9867"} {"text":"data_9868"} {"text":"data_9869"} {"text":"data_9870"} {"text":"data_9871"} {"text":"data_9872"} {"text":"data_9873"} {"text":"data_9874"} {"text":"data_9875"} {"text":"data_9876"} {"text":"data_9877"} {"text":"data_9878"} {"text":"data_9879"} {"text":"data_9880"} {"text":"data_9881"} {"text":"data_9882"} {"text":"data_9883"} {"text":"data_9884"} {"text":"data_9885"} {"text":"data_9886"} {"text":"data_9887"} {"text":"data_9888"} {"text":"data_9889"} {"text":"data_9890"} {"text":"data_9891"} {"text":"data_9892"} {"text":"data_9893"} {"text":"data_9894"} {"text":"data_9895"} {"text":"data_9896"} {"text":"data_9897"} {"text":"data_9898"} {"text":"data_9899"} {"text":"data_9900"} {"text":"data_9901"} {"text":"data_9902"} {"text":"data_9903"} {"text":"data_9904"} {"text":"data_9905"} {"text":"data_9906"} {"text":"data_9907"} {"text":"data_9908"} {"text":"data_9909"} {"text":"data_9910"} {"text":"data_9911"} {"text":"data_9912"} {"text":"data_9913"} {"text":"data_9914"} {"text":"data_9915"} {"text":"data_9916"} {"text":"data_9917"} {"text":"data_9918"} {"text":"data_9919"} {"text":"data_9920"} {"text":"data_9921"} {"text":"data_9922"} {"text":"data_9923"} {"text":"data_9924"} {"text":"data_9925"} {"text":"data_9926"} {"text":"data_9927"} {"text":"data_9928"} {"text":"data_9929"} {"text":"data_9930"} {"text":"data_9931"} {"text":"data_9932"} {"text":"data_9933"} {"text":"data_9934"} {"text":"data_9935"} {"text":"data_9936"} {"text":"data_9937"} {"text":"data_9938"} {"text":"data_9939"} {"text":"data_9940"} {"text":"data_9941"} {"text":"data_9942"} {"text":"data_9943"} {"text":"data_9944"} {"text":"data_9945"} {"text":"data_9946"} {"text":"data_9947"} {"text":"data_9948"} {"text":"data_9949"} {"text":"data_9950"} {"text":"data_9951"} {"text":"data_9952"} {"text":"data_9953"} {"text":"data_9954"} {"text":"data_9955"} {"text":"data_9956"} {"text":"data_9957"} {"text":"data_9958"} {"text":"data_9959"} {"text":"data_9960"} {"text":"data_9961"} {"text":"data_9962"} {"text":"data_9963"} {"text":"data_9964"} {"text":"data_9965"} {"text":"data_9966"} {"text":"data_9967"} {"text":"data_9968"} {"text":"data_9969"} {"text":"data_9970"} {"text":"data_9971"} {"text":"data_9972"} {"text":"data_9973"} {"text":"data_9974"} {"text":"data_9975"} {"text":"data_9976"} {"text":"data_9977"} {"text":"data_9978"} {"text":"data_9979"} {"text":"data_9980"} {"text":"data_9981"} {"text":"data_9982"} {"text":"data_9983"} {"text":"data_9984"} {"text":"data_9985"} {"text":"data_9986"} {"text":"data_9987"} {"text":"data_9988"} {"text":"data_9989"} {"text":"data_9990"} {"text":"data_9991"} {"text":"data_9992"} {"text":"data_9993"} {"text":"data_9994"} {"text":"data_9995"} {"text":"data_9996"} {"text":"data_9997"} {"text":"data_9998"} {"text":"data_9999"}
promptflow/src/promptflow/tests/test_configs/datas/load_data_cases/10k/5k.2.jsonl/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/datas/load_data_cases/10k/5k.2.jsonl", "repo_id": "promptflow", "token_count": 45023 }
56
def my_flow(text: str): raise Exception("dummy exception")
promptflow/src/promptflow/tests/test_configs/eager_flows/dummy_flow_with_exception/entry.py/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/dummy_flow_with_exception/entry.py", "repo_id": "promptflow", "token_count": 21 }
57
entry: my_func
promptflow/src/promptflow/tests/test_configs/eager_flows/invalid_no_path/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/invalid_no_path/flow.dag.yaml", "repo_id": "promptflow", "token_count": 5 }
58
inputs: text: type: string default: hello outputs: output: type: string reference: ${nodeC.output} nodes: - name: nodeA type: python source: type: code path: pass_through.py inputs: input1: ${inputs.text} activate: when: ${inputs.text} is: hi - name: nodeB type: python source: type: code path: pass_through.py inputs: input1: ${inputs.text} activate: when: ${inputs.text} is: hi - name: nodeC type: python source: type: code path: summary_result.py inputs: input1: ${nodeA.output} input2: ${nodeB.output} activate: when: dummy is: dummy
promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/flow.dag.yaml", "repo_id": "promptflow", "token_count": 280 }
59
[ { "groundtruth": "Tomorrow's weather will be sunny.", "prediction": "The weather will be sunny tomorrow." }, { "groundtruth": "Hello,", "prediction": "World." }, { "groundtruth": "Promptflow is a super easy-to-use tool, right?", "prediction": "Yes!" } ]
promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/samples.json/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/samples.json", "repo_id": "promptflow", "token_count": 111 }
60
from promptflow import tool import asyncio @tool async def passthrough_str_and_wait(input1: str, wait_seconds=3) -> str: assert isinstance(input1, str), f"input1 should be a string, got {input1}" print(f"Wait for {wait_seconds} seconds in async function") for i in range(wait_seconds): print(i) await asyncio.sleep(1) return input1
promptflow/src/promptflow/tests/test_configs/flows/async_tools/async_passthrough.py/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/async_tools/async_passthrough.py", "repo_id": "promptflow", "token_count": 137 }
61
{ "incident_id_extractor.completed": 3, "job_info_extractor.completed": 1, "job_info_extractor.bypassed": 2, "incident_info_extractor.completed": 2, "incident_info_extractor.bypassed": 1, "icm_retriever.completed": 1, "icm_retriever.bypassed": 2, "tsg_retriever.completed": 1, "tsg_retriever.bypassed": 2, "kql_tsg_retriever.completed": 1, "kql_tsg_retriever.bypassed": 2, "investigation_steps.completed": 2, "investigation_steps.bypassed": 1, "retriever_summary.completed": 2, "retriever_summary.bypassed": 1, "investigation_method.completed": 3 }
promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/expected_status_summary.json/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/expected_status_summary.json", "repo_id": "promptflow", "token_count": 282 }
62
{ "square.bypassed": 2, "double.completed": 2, "collect_node.completed": 4, "double.bypassed": 2, "square.completed": 2, "aggregation_double.completed": 1, "aggregation_square.completed": 1 }
promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/expected_status_summary.json/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/expected_status_summary.json", "repo_id": "promptflow", "token_count": 96 }
63
# syntax=docker/dockerfile:1 FROM docker.io/continuumio/miniconda3:latest WORKDIR / COPY ./flow /flow # create conda environment RUN conda create -n promptflow-serve python=3.9.16 pip=23.0.1 -q -y && \ conda run -n promptflow-serve \ pip install -r /flow/requirements_txt && \ conda run -n promptflow-serve pip install keyrings.alt && \ conda run -n promptflow-serve pip install gunicorn==20.1.0 && \ conda run -n promptflow-serve pip cache purge && \ conda clean -a -y RUN apt-get update && apt-get install -y runit EXPOSE 8080 COPY ./connections/* /connections/ # reset runsvdir RUN rm -rf /var/runit COPY ./runit /var/runit # grant permission RUN chmod -R +x /var/runit COPY ./start.sh / CMD ["bash", "./start.sh"]
promptflow/src/promptflow/tests/test_configs/flows/export/linux/Dockerfile/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/export/linux/Dockerfile", "repo_id": "promptflow", "token_count": 297 }
64
inputs: question: type: string outputs: output: type: string reference: ${test_langchain_traces.output} nodes: - name: test_langchain_traces type: python source: type: code path: test_langchain_traces.py inputs: question: ${inputs.question} conn: azure_open_ai_connection
promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/flow.dag.yaml", "repo_id": "promptflow", "token_count": 123 }
65
from promptflow import tool @tool def echo(text): """yield the input string.""" echo_text = "Echo - " + text for word in echo_text.split(): yield word
promptflow/src/promptflow/tests/test_configs/flows/generator_nodes/echo.py/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/generator_nodes/echo.py", "repo_id": "promptflow", "token_count": 65 }
66
echo Hello Promptflow!
promptflow/src/promptflow/tests/test_configs/flows/intent-copilot/setup.sh/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/intent-copilot/setup.sh", "repo_id": "promptflow", "token_count": 6 }
67
$schema: https://azuremlschemas.azureedge.net/latest/flow.schema.json name: classification_accuracy_eval type: evaluate path: azureml://datastores/workspaceworkingdirectory/paths/Users/wanhan/my_flow_snapshot/flow.dag.yaml
promptflow/src/promptflow/tests/test_configs/flows/meta_files/remote_fs.meta.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/meta_files/remote_fs.meta.yaml", "repo_id": "promptflow", "token_count": 80 }
68
import openai from openai.version import VERSION as OPENAI_VERSION from typing import List from promptflow import tool from promptflow.connections import AzureOpenAIConnection IS_LEGACY_OPENAI = OPENAI_VERSION.startswith("0.") def get_client(connection: AzureOpenAIConnection): api_key = connection.api_key conn = dict( api_key=connection.api_key, ) if api_key.startswith("sk-"): from openai import OpenAI as Client else: from openai import AzureOpenAI as Client conn.update( azure_endpoint=connection.api_base, api_version=connection.api_version, ) return Client(**conn) def create_messages(question, chat_history): yield {"role": "system", "content": "You are a helpful assistant."} for chat in chat_history: yield {"role": "user", "content": chat["inputs"]["question"]} yield {"role": "assistant", "content": chat["outputs"]["answer"]} yield {"role": "user", "content": question} @tool def chat(connection: AzureOpenAIConnection, question: str, chat_history: List, stream: bool) -> str: if IS_LEGACY_OPENAI: completion = openai.ChatCompletion.create( engine="gpt-35-turbo", messages=list(create_messages(question, chat_history)), temperature=1.0, top_p=1.0, n=1, stream=stream, stop=None, max_tokens=16, **dict(connection), ) else: completion = get_client(connection).chat.completions.create( model="gpt-35-turbo", messages=list(create_messages(question, chat_history)), temperature=1.0, top_p=1.0, n=1, stream=stream, stop=None, max_tokens=16 ) if stream: def generator(): for chunk in completion: if chunk.choices: if IS_LEGACY_OPENAI: yield getattr(chunk.choices[0]["delta"], "content", "") else: yield chunk.choices[0].delta.content or "" # We must return the generator object, not using yield directly here. # Otherwise, the function itself will become a generator, despite whether stream is True or False. # return generator() return "".join(generator()) else: # chat api may return message with no content. if IS_LEGACY_OPENAI: return getattr(completion.choices[0].message, "content", "") else: return completion.choices[0].message.content or ""
promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/chat.py/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/chat.py", "repo_id": "promptflow", "token_count": 1207 }
69
{"text": "text_0"} {"text": "text_1"} {"text": "text_2"} {"text": "text_3"} {"text": "text_4"} {"text": "text_5"} {"text": "text_6"} {"text": "text_7"} {"text": "text_8"} {"text": "text_9"}
promptflow/src/promptflow/tests/test_configs/flows/print_input_flow/inputs.jsonl/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/print_input_flow/inputs.jsonl", "repo_id": "promptflow", "token_count": 89 }
70
{"idx": 1, "line_number": 0} {"idx": 2, "line_number": 1} {"idx": 4, "line_number": 3} {"idx": 5, "line_number": 4} {"idx": 7, "line_number": 6} {"idx": 8, "line_number": 7} {"idx": 10, "line_number": 9}
promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/inputs/output.jsonl/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/inputs/output.jsonl", "repo_id": "promptflow", "token_count": 98 }
71
inputs: image: type: image default: logo.jpg outputs: output: type: image reference: ${python_node_2.output} nodes: - name: python_node type: python source: type: code path: pick_an_image.py inputs: image_1: ${inputs.image} image_2: logo_2.png - name: python_node_2 type: python source: type: code path: pick_an_image.py inputs: image_1: ${python_node.output} image_2: logo_2.png
promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/flow.dag.yaml", "repo_id": "promptflow", "token_count": 195 }
72
id: use_functions_with_chat_models name: Use Functions with Chat Models inputs: chat_history: type: list default: - inputs: question: What is the weather like in Boston? outputs: answer: '{"forecast":["sunny","windy"],"location":"Boston","temperature":"72","unit":"fahrenheit"}' llm_output: content: null function_call: name: get_current_weather arguments: |- { "location": "Boston" } role: assistant is_chat_input: false question: type: string default: How about London next week? is_chat_input: true outputs: answer: type: string reference: ${run_function.output} is_chat_output: true llm_output: type: object reference: ${use_functions_with_chat_models.output} nodes: - name: run_function type: python source: type: code path: run_function.py inputs: response_message: ${use_functions_with_chat_models.output} use_variants: false - name: use_functions_with_chat_models type: llm source: type: code path: use_functions_with_chat_models.jinja2 inputs: deployment_name: gpt-35-turbo temperature: 0.7 top_p: 1 stop: "" max_tokens: 256 presence_penalty: 0 frequency_penalty: 0 logit_bias: "" functions: - name: get_current_weather description: Get the current weather in a given location parameters: type: object properties: location: type: string description: The city and state, e.g. San Francisco, CA unit: type: string enum: - celsius - fahrenheit required: - location - name: get_n_day_weather_forecast description: Get an N-day weather forecast parameters: type: object properties: location: type: string description: The city and state, e.g. San Francisco, CA format: type: string enum: - celsius - fahrenheit description: The temperature unit to use. Infer this from the users location. num_days: type: integer description: The number of days to forecast required: - location - format - num_days function_call: name: get_current_weather chat_history: ${inputs.chat_history} question: ${inputs.question} provider: AzureOpenAI connection: azure_open_ai_connection api: chat module: promptflow.tools.aoai use_variants: false node_variants: {} environment: python_requirements_txt: requirements.txt
promptflow/src/promptflow/tests/test_configs/flows/sample_flow_with_functions/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/sample_flow_with_functions/flow.dag.yaml", "repo_id": "promptflow", "token_count": 1210 }
73
from aaa import bbb # noqa: F401
promptflow/src/promptflow/tests/test_configs/flows/script_with_import/fail.py/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/script_with_import/fail.py", "repo_id": "promptflow", "token_count": 13 }
74
[ { "url": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h" }, { "url": "https://www.microsoft.com/en-us/windows/" } ]
promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/samples.json/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_no_variants/samples.json", "repo_id": "promptflow", "token_count": 85 }
75
interactions: - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.9.17 (Windows-10-10.0.22621-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-09-22T05:26:30.7527337+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2024-01-10T09:29:14.5412854+00:00", "lastModifiedBy": "Philip Gao", "lastModifiedByType": "User"}}' headers: cache-control: - no-cache content-length: - '1200' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.078' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '0' User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.9.17 (Windows-10-10.0.22621-SP0) method: POST uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets response: body: string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' headers: cache-control: - no-cache content-length: - '134' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.160' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.13.0 Python/3.9.17 (Windows-10-10.0.22621-SP0) x-ms-date: - Mon, 15 Jan 2024 06:06:51 GMT x-ms-version: - '2021-08-06' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_input_flow.jsonl response: body: string: '' headers: accept-ranges: - bytes content-length: - '56' content-md5: - I/k2vLbQ+WkABQncQUd5Rg== content-type: - application/octet-stream last-modified: - Tue, 26 Dec 2023 03:34:26 GMT server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 vary: - Origin x-ms-blob-type: - BlockBlob x-ms-creation-time: - Tue, 26 Dec 2023 03:34:26 GMT x-ms-meta-name: - 489008dc-84bd-4bee-82db-0bb80f7ad272 x-ms-meta-upload_status: - completed x-ms-meta-version: - b7185e71-1e33-43b7-b8be-43bdd40e6731 x-ms-version: - '2021-08-06' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.13.0 Python/3.9.17 (Windows-10-10.0.22621-SP0) x-ms-date: - Mon, 15 Jan 2024 06:06:52 GMT x-ms-version: - '2021-08-06' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_input_flow.jsonl response: body: string: '' headers: server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 transfer-encoding: - chunked vary: - Origin x-ms-error-code: - BlobNotFound x-ms-version: - '2021-08-06' status: code: 404 message: The specified blob does not exist. - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.9.17 (Windows-10-10.0.22621-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-09-22T05:26:30.7527337+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2024-01-10T09:29:14.5412854+00:00", "lastModifiedBy": "Philip Gao", "lastModifiedByType": "User"}}' headers: cache-control: - no-cache content-length: - '1200' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.085' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '0' User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.9.17 (Windows-10-10.0.22621-SP0) method: POST uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets response: body: string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' headers: cache-control: - no-cache content-length: - '134' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.066' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.13.0 Python/3.9.17 (Windows-10-10.0.22621-SP0) x-ms-date: - Mon, 15 Jan 2024 06:06:53 GMT x-ms-version: - '2021-08-06' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_input_flow/flow.dag.yaml response: body: string: '' headers: accept-ranges: - bytes content-length: - '238' content-md5: - g6eojZlNZcUGTvvyK1WJEg== content-type: - application/octet-stream last-modified: - Tue, 26 Dec 2023 03:34:26 GMT server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 vary: - Origin x-ms-blob-type: - BlockBlob x-ms-creation-time: - Tue, 26 Dec 2023 03:34:25 GMT x-ms-meta-name: - ab2797da-2f87-47a2-9c7a-1cdd447fa102 x-ms-meta-upload_status: - completed x-ms-meta-version: - '1' x-ms-version: - '2021-08-06' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.13.0 Python/3.9.17 (Windows-10-10.0.22621-SP0) x-ms-date: - Mon, 15 Jan 2024 06:06:55 GMT x-ms-version: - '2021-08-06' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_input_flow/flow.dag.yaml response: body: string: '' headers: server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 transfer-encoding: - chunked vary: - Origin x-ms-error-code: - BlobNotFound x-ms-version: - '2021-08-06' status: code: 404 message: The specified blob does not exist. - request: body: '[{"iKey": "8b52b368-4c91-4226-b7f7-be52822f0509", "tags": {"ai.cloud.role": "pfazure", "ai.device.locale": "zh_CN", "ai.device.osVersion": "10.0.22621", "ai.device.type": "Other", "ai.internal.sdkVersion": "py3.9.17:oc0.11.2:ext1.1.9", "ai.operation.id": "00000000000000000000000000000000", "ai.operation.parentId": "|00000000000000000000000000000000.0000000000000000."}, "time": "2024-01-15T06:06:48.727100Z", "name": "Microsoft.ApplicationInsights.Event", "data": {"baseData": {"name": "pfazure.run.create.start", "properties": {"request_id": "ab23fd05-6e99-4553-bf58-eea847cd7cb2", "first_call": true, "activity_name": "pfazure.run.create", "activity_type": "PublicApi", "user_agent": "promptflow-cli/0.0.1 perf_monitor/1.0", "subscription_id": "96aede12-2f73-41cb-b983-6d11a904839b", "resource_group_name": "promptflow", "workspace_name": "promptflow-eastus2euap", "custom_message": "github run: https://github.com/microsoft/promptflow/actions/runs/None", "level": "INFO", "from_ci": false, "python_version": "3.9.17", "installation_id": "920feacd-3514-4287-a262-783d110775e1"}, "measurements": {}, "ver": 2}, "baseType": "EventData"}}]' headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '1140' Content-Type: - application/json; charset=utf-8 User-Agent: - python-requests/2.31.0 method: POST uri: https://dc.services.visualstudio.com/v2.1/track response: body: string: '{"itemsReceived": 1, "itemsAccepted": 0, "errors": [{"index": 0, "statusCode": 307, "message": "Ingestion is allowed only from stamp specific endpoint - Location: https://eastus-8.in.applicationinsights.azure.com/v2.1/track"}]}' headers: cache-control: - max-age=604800 content-length: - '217' content-type: - application/json; charset=utf-8 location: - https://eastus-8.in.applicationinsights.azure.com/v2.1/track server: - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000 x-content-type-options: - nosniff status: code: 307 message: Temporary Redirect - request: body: '[{"iKey": "8b52b368-4c91-4226-b7f7-be52822f0509", "tags": {"ai.cloud.role": "pfazure", "ai.device.locale": "zh_CN", "ai.device.osVersion": "10.0.22621", "ai.device.type": "Other", "ai.internal.sdkVersion": "py3.9.17:oc0.11.2:ext1.1.9", "ai.operation.id": "00000000000000000000000000000000", "ai.operation.parentId": "|00000000000000000000000000000000.0000000000000000."}, "time": "2024-01-15T06:06:48.727100Z", "name": "Microsoft.ApplicationInsights.Event", "data": {"baseData": {"name": "pfazure.run.create.start", "properties": {"request_id": "ab23fd05-6e99-4553-bf58-eea847cd7cb2", "first_call": true, "activity_name": "pfazure.run.create", "activity_type": "PublicApi", "user_agent": "promptflow-cli/0.0.1 perf_monitor/1.0", "subscription_id": "96aede12-2f73-41cb-b983-6d11a904839b", "resource_group_name": "promptflow", "workspace_name": "promptflow-eastus2euap", "custom_message": "github run: https://github.com/microsoft/promptflow/actions/runs/None", "level": "INFO", "from_ci": false, "python_version": "3.9.17", "installation_id": "920feacd-3514-4287-a262-783d110775e1"}, "measurements": {}, "ver": 2}, "baseType": "EventData"}}]' headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '1140' Content-Type: - application/json; charset=utf-8 User-Agent: - python-requests/2.31.0 method: POST uri: https://eastus-8.in.applicationinsights.azure.com/v2.1/track response: body: string: '{"itemsReceived": 1, "itemsAccepted": 1, "errors": []}' headers: content-length: - '49' content-type: - application/json; charset=utf-8 server: - Microsoft-HTTPAPI/2.0 strict-transport-security: - max-age=31536000 x-content-type-options: - nosniff status: code: 200 message: OK - request: body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": "LocalUpload/000000000000000000000000000000000000/print_input_flow/flow.dag.yaml", "runId": "name", "runDisplayName": "name", "runExperimentName": "", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/print_input_flow.jsonl"}, "inputsMapping": {}, "connections": {}, "environmentVariables": {}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000", "sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000", "runDisplayNameGenerationType": "UserProvidedMacro"}' headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '787' Content-Type: - application/json User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.9.17 (Windows-10-10.0.22621-SP0) method: POST uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit response: body: string: '"name"' headers: connection: - keep-alive content-length: - '38' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload x-content-type-options: - nosniff x-request-time: - '8.634' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.9.17 (Windows-10-10.0.22621-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name response: body: string: '{"flowGraph": {"nodes": [{"name": "print_input", "type": "python", "source": {"type": "code", "path": "print_input.py"}, "inputs": {"text": "${inputs.text}"}, "tool": "print_input.py", "reduce": false}], "tools": [{"name": "Azure OpenAI GPT-4 Turbo with Vision", "type": "custom_llm", "inputs": {"connection": {"type": ["AzureOpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": 512, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure OpenAI GPT-4 Turbo with Vision to leverage AOAI vision ability.", "module": "promptflow.tools.aoai_gpt4v", "class_name": "AzureOpenAI", "function": "chat", "icon": {"dark": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA2ElEQVR4nJXSzW3CQBAF4DUSTjk+Al1AD0ikESslpBIEheRALhEpgAYSWV8OGUublf/yLuP3PPNmdndS+gdwXZrYDmh7fGE/W+wXbaYd8IYm4rxJPnZ0boI3wZcdJxs/n+AwV7DFK7aFyfQdYIMLPvES8YJNf5yp4jMeeEYdWh38gXOR35YGHe5xabvQdsHv6PLi8qV6gycc8YH3iMfQu6Lh4ASr+F5Hh3XwVWnQYzUkVlX1nccplAb1SN6Y/sfgmlK64VS8wimldIv/0yj2QLkHizG0iWP4AVAfQ34DVQONAAAAAElFTkSuQmCC", "light": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAx0lEQVR4nJWSwQ2CQBBFX0jAcjgqXUgPJNiIsQQrIVCIFy8GC6ABDcGDX7Mus9n1Xz7zZ+fPsLPwH4bUg0dD2wMPcbR48Uxq4AKU4iSTDwZ1LhWXipN/B3V0J6hjBTvgLHZNonewBXrgDpzEvXSIjN0BE3AACmmF4kl5F6tNzcCoLpW0SvGovFvsb4oZ2AANcAOu4ka6axCcINN3rg654sww+CYsPD0OwjcozFNh/Qcd78tqVbCIW+n+Fky472Bh/Q6SYb1EEy8tDzd+9IsVPAAAAABJRU5ErkJggg=="}, "is_builtin": true, "package": "promptflow-tools", "package_version": "1.0.3", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "preview"}, {"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "1.0.3", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "1.0.3", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Model LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "default": "", "dynamic_list": {"func_path": "promptflow.tools.open_model_llm.list_deployment_names", "func_kwargs": [{"name": "endpoint", "optional": true, "reference": "${inputs.endpoint}", "type": ["string"]}]}, "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "dynamic_list": {"func_path": "promptflow.tools.open_model_llm.list_endpoint_names"}, "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an open model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_model_llm", "class_name": "OpenModelLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "1.0.3", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": 512, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "icon": {"dark": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA2ElEQVR4nJXSzW3CQBAF4DUSTjk+Al1AD0ikESslpBIEheRALhEpgAYSWV8OGUublf/yLuP3PPNmdndS+gdwXZrYDmh7fGE/W+wXbaYd8IYm4rxJPnZ0boI3wZcdJxs/n+AwV7DFK7aFyfQdYIMLPvES8YJNf5yp4jMeeEYdWh38gXOR35YGHe5xabvQdsHv6PLi8qV6gycc8YH3iMfQu6Lh4ASr+F5Hh3XwVWnQYzUkVlX1nccplAb1SN6Y/sfgmlK64VS8wimldIv/0yj2QLkHizG0iWP4AVAfQ34DVQONAAAAAElFTkSuQmCC", "light": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAx0lEQVR4nJWSwQ2CQBBFX0jAcjgqXUgPJNiIsQQrIVCIFy8GC6ABDcGDX7Mus9n1Xz7zZ+fPsLPwH4bUg0dD2wMPcbR48Uxq4AKU4iSTDwZ1LhWXipN/B3V0J6hjBTvgLHZNonewBXrgDpzEvXSIjN0BE3AACmmF4kl5F6tNzcCoLpW0SvGovFvsb4oZ2AANcAOu4ka6axCcINN3rg654sww+CYsPD0OwjcozFNh/Qcd78tqVbCIW+n+Fky472Bh/Q6SYb1EEy8tDzd+9IsVPAAAAABJRU5ErkJggg=="}, "is_builtin": true, "package": "promptflow-tools", "package_version": "1.0.3", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "preview"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "1.0.3", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Index Lookup", "type": "python", "inputs": {"acs_content_field": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Azure AI Search"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_acs_index_fields", "func_kwargs": [{"name": "acs_connection", "optional": false, "reference": "${inputs.acs_index_connection}", "type": ["CognitiveSearchConnection"]}, {"name": "acs_index_name", "optional": false, "reference": "${inputs.acs_index_name}", "type": ["string"]}, {"default": "Edm.String", "name": "field_data_type", "optional": false, "type": ["string"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "acs_embedding_field": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Azure AI Search"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_acs_index_fields", "func_kwargs": [{"name": "acs_connection", "optional": false, "reference": "${inputs.acs_index_connection}", "type": ["CognitiveSearchConnection"]}, {"name": "acs_index_name", "optional": false, "reference": "${inputs.acs_index_name}", "type": ["string"]}, {"default": "Collection(Edm.Single)", "name": "field_data_type", "optional": false, "type": ["string"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "acs_index_connection": {"type": ["CognitiveSearchConnection"], "enabled_by": "index_type", "enabled_by_value": ["Azure AI Search"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "acs_index_name": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Azure AI Search"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_acs_indices", "func_kwargs": [{"name": "acs_connection", "optional": false, "reference": "${inputs.acs_index_connection}", "type": ["CognitiveSearchConnection"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "acs_metadata_field": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Azure AI Search"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_acs_index_fields", "func_kwargs": [{"name": "acs_connection", "optional": false, "reference": "${inputs.acs_index_connection}", "type": ["CognitiveSearchConnection"]}, {"name": "acs_index_name", "optional": false, "reference": "${inputs.acs_index_name}", "type": ["string"]}, {"default": "Edm.String", "name": "field_data_type", "optional": false, "type": ["string"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "aoai_embedding_connection": {"type": ["AzureOpenAIConnection"], "enabled_by": "embedding_type", "enabled_by_value": ["Azure OpenAI"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "embedding_deployment": {"type": ["string"], "enabled_by": "embedding_type", "enabled_by_value": ["Azure OpenAI"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_aoai_embedding_deployments", "func_kwargs": [{"name": "aoai_connection", "optional": false, "reference": "${inputs.aoai_embedding_connection}", "type": ["AzurOpenAIConnection"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "embedding_model": {"type": ["string"], "enabled_by": "embedding_type", "enabled_by_value": ["OpenAI", "Hugging Face"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_embedding_models", "func_kwargs": [{"name": "embedding_type", "optional": false, "reference": "${inputs.embedding_type}", "type": ["string"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "embedding_type": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Azure AI Search", "FAISS", "Pinecone"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_available_embedding_types", "func_kwargs": [{"name": "index_type", "optional": false, "reference": "${inputs.index_type}", "type": ["string"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "faiss_index_path": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["FAISS"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "index_type": {"type": ["string"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_available_index_types"}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "mlindex_asset_id": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Registered Index"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_registered_mlindices"}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "mlindex_content": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "generated_by": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.forward_mapping", "func_kwargs": [{"name": "index_type", "reference": "${inputs.index_type}", "type": ["string"]}, {"name": "mlindex_asset_id", "optional": true, "reference": "${inputs.mlindex_asset_id}", "type": ["string"]}, {"name": "mlindex_path", "optional": true, "reference": "${inputs.mlindex_path}", "type": ["string"]}, {"name": "acs_index_connection", "optional": true, "reference": "${inputs.acs_index_connection}", "type": ["CognitiveSearchConnection"]}, {"name": "acs_index_name", "optional": true, "reference": "${inputs.acs_index_name}", "type": ["string"]}, {"name": "acs_content_field", "optional": true, "reference": "${inputs.acs_content_field}", "type": ["string"]}, {"name": "acs_embedding_field", "optional": true, "reference": "${inputs.acs_embedding_field}", "type": ["string"]}, {"name": "acs_metadata_field", "optional": true, "reference": "${inputs.acs_metadata_field}", "type": ["string"]}, {"name": "semantic_configuration", "optional": true, "reference": "${inputs.semantic_configuration}", "type": ["string"]}, {"name": "faiss_index_path", "optional": true, "reference": "${inputs.faiss_index_path}", "type": ["string"]}, {"name": "pinecone_index_connection", "optional": true, "reference": "${inputs.pinecone_index_connection}", "type": ["string"]}, {"name": "pinecone_index_name", "optional": true, "reference": "${inputs.pinecone_index_name}", "type": ["string"]}, {"name": "pinecone_content_field", "optional": true, "reference": "${inputs.pinecone_content_field}", "type": ["string"]}, {"name": "pinecone_metadata_field", "optional": true, "reference": "${inputs.pinecone_metadata_field}", "type": ["string"]}, {"name": "embedding_type", "optional": true, "reference": "${inputs.embedding_type}", "type": ["string"]}, {"name": "aoai_embedding_connection", "optional": true, "reference": "${inputs.aoai_embedding_connection}", "type": ["AzureOpenAIConnection"]}, {"name": "oai_embedding_connection", "optional": true, "reference": "${inputs.oai_embedding_connection}", "type": ["string"]}, {"name": "embedding_model", "optional": true, "reference": "${inputs.embedding_model}", "type": ["string"]}, {"name": "embedding_deployment", "optional": true, "reference": "${inputs.embedding_deployment}", "type": ["string"]}], "reverse_func_path": "promptflow_vectordb.tool.common_index_lookup_utils.reverse_mapping"}, "input_type": "default"}, "mlindex_path": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["MLIndex file from path"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "oai_embedding_connection": {"type": ["OpenAIConnection"], "enabled_by": "embedding_type", "enabled_by_value": ["OpenAI"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "pinecone_content_field": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Pinecone"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "pinecone_index_connection": {"type": ["PineconeConnection"], "enabled_by": "index_type", "enabled_by_value": ["Pinecone"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_pinecone_connections"}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "pinecone_index_name": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Pinecone"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_pinecone_indices", "func_kwargs": [{"name": "pinecone_connection_name", "optional": false, "reference": "${inputs.pinecone_index_connection}", "type": ["string"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "pinecone_metadata_field": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Pinecone"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "queries": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query_type": {"type": ["string"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_available_query_types", "func_kwargs": [{"name": "mlindex_content", "optional": false, "reference": "${inputs.mlindex_content}", "type": ["string"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "semantic_configuration": {"type": ["string"], "enabled_by": "index_type", "enabled_by_value": ["Azure AI Search"], "dynamic_list": {"func_path": "promptflow_vectordb.tool.common_index_lookup_utils.list_acs_index_semantic_configurations", "func_kwargs": [{"name": "acs_connection", "optional": false, "reference": "${inputs.acs_index_connection}", "type": ["CognitiveSearchConnection"]}, {"name": "acs_index_name", "optional": false, "reference": "${inputs.acs_index_name}", "type": ["string"]}]}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "uionly_hidden"}, "top_k": {"type": ["int"], "default": 3, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search an AzureML Vector Index for relevant results using one or more text queries.", "module": "promptflow_vectordb.tool.common_index_lookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "preview"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "print_input.py", "type": "python", "inputs": {"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "print_input.py", "function": "print_inputs", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"text": {"type": "string", "is_chat_input": false}}, "outputs": {"output_text": {"type": "string", "reference": "${print_input.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus2euap/workspaces/00000/flows/name/flowRuns/name", "flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/80d5ca5a93c8c6ea7c2fcdf1cd4b9d07/print_input_flow.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic", "inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "541bf448-5153-4b0a-a34e-c4e39763e775", "studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '26138' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.292' status: code: 200 message: OK - request: body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, "selectJobSpecification": true}' headers: Accept: - '*/*' Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '137' Content-Type: - application/json User-Agent: - python-requests/2.31.0 method: POST uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata response: body: string: '{"runMetadata": {"runNumber": 1705298823, "rootRunId": "name", "createdUtc": "2024-01-15T06:07:03.006454+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId": "10032001A114F500", "userIdp": null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Chenyang Zhang", "upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 3, "statusRevision": 1, "runUuid": "63400a6e-7d8b-4302-a031-c08f99d265df", "parentRunUuid": null, "rootRunUuid": "63400a6e-7d8b-4302-a031-c08f99d265df", "lastStartTimeUtc": null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId": "10032001A114F500", "userIdp": null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Chenyang Zhang", "upn": null}, "lastModifiedUtc": "2024-01-15T06:07:08.4229129+00:00", "duration": null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId": null, "experimentId": "4d67b7d7-dc1c-4699-a53e-318ca24f1f72", "status": "Preparing", "startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName": "name", "name": null, "dataContainerId": "dcid.name", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": null}, "properties": {"azureml.promptflow.runtime_name": "automatic", "azureml.promptflow.runtime_version": "20240111.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", "azureml.promptflow.session_id": "96c35660d759b526cb14cf69bbb56fcaa90c05a778b147b4", "azureml.promptflow.flow_lineage_id": "fcb01d4faf8f2137f9d142cc1f3f22ecdf3d485e1435c4c971ff0ee09077f1b5", "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", "azureml.promptflow.flow_definition_blob_path": "LocalUpload/52675845d616ad8b41db77e9ba9e9d4a/print_input_flow/flow.dag.yaml", "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/80d5ca5a93c8c6ea7c2fcdf1cd4b9d07/print_input_flow.jsonl", "_azureml.evaluation_run": "promptflow.BatchRun", "azureml.promptflow.snapshot_id": "541bf448-5153-4b0a-a34e-c4e39763e775"}, "parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": null, "jobSpecification": null, "systemSettings": null}' headers: connection: - keep-alive content-length: - '3709' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.018' status: code: 200 message: OK version: 1
promptflow/src/promptflow/tests/test_configs/recordings/test_azure_cli_perf_TestAzureCliPerf_test_pfazure_run_create.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_azure_cli_perf_TestAzureCliPerf_test_pfazure_run_create.yaml", "repo_id": "promptflow", "token_count": 22751 }
76
interactions: - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' headers: cache-control: - no-cache content-length: - '3630' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.026' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false response: body: string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType": "Application"}}]}' headers: cache-control: - no-cache content-length: - '1372' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.053' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType": "Application"}}' headers: cache-control: - no-cache content-length: - '1227' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.076' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '0' User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: POST uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets response: body: string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' headers: cache-control: - no-cache content-length: - '134' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.150' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 08:46:41 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl response: body: string: '' headers: accept-ranges: - bytes content-length: - '127' content-md5: - i/8q1x5YKzHv3Fd/R8lYUQ== content-type: - application/octet-stream last-modified: - Fri, 28 Jul 2023 12:34:52 GMT server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 vary: - Origin x-ms-blob-type: - BlockBlob x-ms-creation-time: - Fri, 28 Jul 2023 12:34:52 GMT x-ms-meta-name: - 13fa99dd-c98e-4f2a-a704-4295d4ed6f68 x-ms-meta-upload_status: - completed x-ms-meta-version: - 0367c5c6-9f53-4a75-8623-7e53699f0d0b x-ms-version: - '2023-11-03' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 08:46:42 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification1.jsonl response: body: string: '' headers: server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 transfer-encoding: - chunked vary: - Origin x-ms-error-code: - BlobNotFound x-ms-version: - '2023-11-03' status: code: 404 message: The specified blob does not exist. - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType": "Application"}}' headers: cache-control: - no-cache content-length: - '1227' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.075' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '0' User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: POST uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets response: body: string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' headers: cache-control: - no-cache content-length: - '134' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.082' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 08:46:45 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 response: body: string: '' headers: accept-ranges: - bytes content-length: - '853' content-md5: - ylTeNqjvuOvtzEZJ/X5n3A== content-type: - application/octet-stream last-modified: - Fri, 12 Jan 2024 08:13:57 GMT server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 vary: - Origin x-ms-blob-type: - BlockBlob x-ms-creation-time: - Fri, 12 Jan 2024 08:13:56 GMT x-ms-meta-name: - 950201e8-c52c-4b15-ada1-5e58de9b2f4d x-ms-meta-upload_status: - completed x-ms-meta-version: - '1' x-ms-version: - '2023-11-03' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 08:46:46 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 response: body: string: '' headers: server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 transfer-encoding: - chunked vary: - Origin x-ms-error-code: - BlobNotFound x-ms-version: - '2023-11-03' status: code: 404 message: The specified blob does not exist. - request: body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", "runId": "run_id", "runDisplayName": "run_id", "runExperimentName": "", "nodeVariant": "${summarize_text_content.variant_0}", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl"}, "inputsMapping": {"url": "${data.url}"}, "connections": {}, "environmentVariables": {}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000", "vmSize": "Standard_D2", "maxIdleTimeSeconds": 3600, "sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000", "runDisplayNameGenerationType": "UserProvidedMacro"}' headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '920' Content-Type: - application/json User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: POST uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit response: body: string: '"run_id"' headers: connection: - keep-alive content-length: - '38' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload x-content-type-options: - nosniff x-request-time: - '3.485' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run_id response: body: string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run_id/flowRuns/run_id", "flowRunId": "run_id", "flowRunDisplayName": "run_id", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/107bd3498e44deb2dccc53d2208d32b2/webClassification1.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic", "inputsMapping": {"url": "${data.url}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/run_id/flow_artifacts", "studioPortalEndpoint": "https://ml.azure.com/runs/run_id?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '945' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.273' status: code: 200 message: OK - request: body: '{"runId": "run_id", "selectRunMetadata": true, "selectRunDefinition": true, "selectJobSpecification": true}' headers: Accept: - '*/*' Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '137' Content-Type: - application/json User-Agent: - python-requests/2.31.0 method: POST uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata response: body: string: '{"runMetadata": {"runNumber": 1705049212, "rootRunId": "run_id", "createdUtc": "2024-01-12T08:46:52.5382691+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", "upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 1, "statusRevision": 0, "runUuid": "2fb0f4b1-9d4b-4dd7-9394-c35727026f45", "parentRunUuid": null, "rootRunUuid": "2fb0f4b1-9d4b-4dd7-9394-c35727026f45", "lastStartTimeUtc": null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", "upn": null}, "lastModifiedUtc": "2024-01-12T08:46:52.5382691+00:00", "duration": null, "cancelationReason": null, "currentAttemptId": 1, "runId": "run_id", "parentRunId": null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "status": "NotStarted", "startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName": "run_id", "name": null, "dataContainerId": "dcid.run_id", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": null}, "properties": {"azureml.promptflow.runtime_name": "automatic", "azureml.promptflow.runtime_version": "20231218.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", "azureml.promptflow.flow_definition_blob_path": "LocalUpload/a1fa6ef1ead7ff3ce76b36250f6f5461/web_classification/flow.dag.yaml", "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/107bd3498e44deb2dccc53d2208d32b2/webClassification1.jsonl", "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "_azureml.evaluation_run": "promptflow.BatchRun"}, "parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": null, "jobSpecification": null, "systemSettings": null}' headers: connection: - keep-alive content-length: - '3921' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.081' status: code: 200 message: OK version: 1
promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_automatic_runtime_with_resources.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_automatic_runtime_with_resources.yaml", "repo_id": "promptflow", "token_count": 10468 }
77
interactions: - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' headers: cache-control: - no-cache content-length: - '3630' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.030' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false response: body: string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType": "Application"}}]}' headers: cache-control: - no-cache content-length: - '1372' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.062' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType": "Application"}}' headers: cache-control: - no-cache content-length: - '1227' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.126' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '0' User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: POST uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets response: body: string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' headers: cache-control: - no-cache content-length: - '134' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.118' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 07:56:14 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl response: body: string: '' headers: accept-ranges: - bytes content-length: - '379' content-md5: - lI/pz9jzTQ7Td3RHPL7y7w== content-type: - application/octet-stream last-modified: - Mon, 06 Nov 2023 08:30:18 GMT server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 vary: - Origin x-ms-blob-type: - BlockBlob x-ms-creation-time: - Mon, 06 Nov 2023 08:30:18 GMT x-ms-meta-name: - 94331215-cf7f-452a-9f1a-1d276bc9b0e4 x-ms-meta-upload_status: - completed x-ms-meta-version: - 3f163752-edb0-4afc-a6f5-b0a670bd7c24 x-ms-version: - '2023-11-03' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 07:56:15 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl response: body: string: '' headers: server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 transfer-encoding: - chunked vary: - Origin x-ms-error-code: - BlobNotFound x-ms-version: - '2023-11-03' status: code: 404 message: The specified blob does not exist. - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType": "Application"}}' headers: cache-control: - no-cache content-length: - '1227' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.068' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '0' User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: POST uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets response: body: string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' headers: cache-control: - no-cache content-length: - '134' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.108' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 07:56:18 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/hello-world/flow.dag.yaml response: body: string: '' headers: accept-ranges: - bytes content-length: - '266' content-md5: - UZm3TyOoKWjSR23+Up6qUA== content-type: - application/octet-stream last-modified: - Tue, 19 Dec 2023 06:05:25 GMT server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 vary: - Origin x-ms-blob-type: - BlockBlob x-ms-creation-time: - Tue, 19 Dec 2023 06:05:25 GMT x-ms-meta-name: - 7b68bf5e-6ef4-4eb3-9f49-28f9a5baad87 x-ms-meta-upload_status: - completed x-ms-meta-version: - '1' x-ms-version: - '2023-11-03' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 07:56:19 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/hello-world/flow.dag.yaml response: body: string: '' headers: server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 transfer-encoding: - chunked vary: - Origin x-ms-error-code: - BlobNotFound x-ms-version: - '2023-11-03' status: code: 404 message: The specified blob does not exist. - request: body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": "LocalUpload/000000000000000000000000000000000000/hello-world/flow.dag.yaml", "runId": "batch_run_name", "runDisplayName": "sdk-cli-test-fixture-batch-run-without-llm", "runExperimentName": "", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"}, "inputsMapping": {"name": "${data.url}"}, "connections": {}, "environmentVariables": {}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000", "sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000", "runDisplayNameGenerationType": "UserProvidedMacro"}' headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '812' Content-Type: - application/json User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: POST uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit response: body: string: '"batch_run_name"' headers: connection: - keep-alive content-length: - '38' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload x-content-type-options: - nosniff x-request-time: - '5.712' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name response: body: string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python", "source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"}, "tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an Open Source model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python", "inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "hello_world.py", "function": "hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input": false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name", "flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "90c907a3-78cc-4691-acfc-dcfab76a270a", "studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '12912' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.439' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name response: body: string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python", "source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"}, "tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an Open Source model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python", "inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "hello_world.py", "function": "hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input": false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name", "flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "90c907a3-78cc-4691-acfc-dcfab76a270a", "studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '12912' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.225' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name response: body: string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python", "source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"}, "tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an Open Source model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python", "inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "hello_world.py", "function": "hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input": false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name", "flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "90c907a3-78cc-4691-acfc-dcfab76a270a", "studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '12912' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.606' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name response: body: string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python", "source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"}, "tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an Open Source model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python", "inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "hello_world.py", "function": "hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input": false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name", "flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "90c907a3-78cc-4691-acfc-dcfab76a270a", "studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '12912' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.516' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType": "Application"}}' headers: cache-control: - no-cache content-length: - '1227' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.111' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '0' User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: POST uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets response: body: string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' headers: cache-control: - no-cache content-length: - '134' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.077' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 07:57:05 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl response: body: string: '' headers: accept-ranges: - bytes content-length: - '379' content-md5: - lI/pz9jzTQ7Td3RHPL7y7w== content-type: - application/octet-stream last-modified: - Mon, 06 Nov 2023 08:30:18 GMT server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 vary: - Origin x-ms-blob-type: - BlockBlob x-ms-creation-time: - Mon, 06 Nov 2023 08:30:18 GMT x-ms-meta-name: - 94331215-cf7f-452a-9f1a-1d276bc9b0e4 x-ms-meta-upload_status: - completed x-ms-meta-version: - 3f163752-edb0-4afc-a6f5-b0a670bd7c24 x-ms-version: - '2023-11-03' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 07:57:06 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl response: body: string: '' headers: server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 transfer-encoding: - chunked vary: - Origin x-ms-error-code: - BlobNotFound x-ms-version: - '2023-11-03' status: code: 404 message: The specified blob does not exist. - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore response: body: string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", "properties": {"description": null, "tags": null, "properties": null, "isDefault": true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType": "Application"}}' headers: cache-control: - no-cache content-length: - '1227' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding,Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.103' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '0' User-Agent: - promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) method: POST uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets response: body: string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' headers: cache-control: - no-cache content-length: - '134' content-type: - application/json; charset=utf-8 expires: - '-1' pragma: - no-cache strict-transport-security: - max-age=31536000; includeSubDomains transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.150' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 07:57:10 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/eval-classification-accuracy/calculate_accuracy.py response: body: string: '' headers: accept-ranges: - bytes content-length: - '409' content-md5: - OyENtlqGVUTrY5zKuzo8XA== content-type: - application/octet-stream last-modified: - Tue, 21 Nov 2023 08:03:40 GMT server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 vary: - Origin x-ms-blob-type: - BlockBlob x-ms-creation-time: - Tue, 21 Nov 2023 08:03:39 GMT x-ms-meta-name: - fd932777-4f3a-4c1d-9c3a-24d45835d7e1 x-ms-meta-upload_status: - completed x-ms-meta-version: - '1' x-ms-version: - '2023-11-03' status: code: 200 message: OK - request: body: null headers: Accept: - application/xml Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0) x-ms-date: - Fri, 12 Jan 2024 07:57:11 GMT x-ms-version: - '2023-11-03' method: HEAD uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/eval-classification-accuracy/calculate_accuracy.py response: body: string: '' headers: server: - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 transfer-encoding: - chunked vary: - Origin x-ms-error-code: - BlobNotFound x-ms-version: - '2023-11-03' status: code: 404 message: The specified blob does not exist. - request: body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": "LocalUpload/000000000000000000000000000000000000/eval-classification-accuracy/flow.dag.yaml", "runId": "eval_run_name", "runDisplayName": "sdk-cli-test-fixture-eval-run-without-llm", "runExperimentName": "", "variantRunId": "batch_run_name", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"}, "inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"}, "connections": {}, "environmentVariables": {}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000", "sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000", "runDisplayNameGenerationType": "UserProvidedMacro"}' headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '950' Content-Type: - application/json User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: POST uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit response: body: string: '"eval_run_name"' headers: connection: - keep-alive content-length: - '38' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload x-content-type-options: - nosniff x-request-time: - '7.278' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name response: body: string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source": {"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}", "prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false}, {"name": "calculate_accuracy", "type": "python", "source": {"type": "code", "path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"}, "tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an Open Source model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type": "python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py", "function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs": {"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py", "function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP", "description": "Please specify the groundtruth column, which contains the true label to the outputs that your flow produces.", "is_chat_input": false}, "prediction": {"type": "string", "default": "APP", "description": "Please specify the prediction column, which contains the predicted outputs that your flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string", "reference": "${grade.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name", "flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "b37a1c40-7c6c-4246-a735-5a9a3881116b", "studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '13872' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.438' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name response: body: string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source": {"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}", "prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false}, {"name": "calculate_accuracy", "type": "python", "source": {"type": "code", "path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"}, "tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an Open Source model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type": "python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py", "function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs": {"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py", "function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP", "description": "Please specify the groundtruth column, which contains the true label to the outputs that your flow produces.", "is_chat_input": false}, "prediction": {"type": "string", "default": "APP", "description": "Please specify the prediction column, which contains the predicted outputs that your flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string", "reference": "${grade.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name", "flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "b37a1c40-7c6c-4246-a735-5a9a3881116b", "studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '13872' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.227' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name response: body: string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source": {"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}", "prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false}, {"name": "calculate_accuracy", "type": "python", "source": {"type": "code", "path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"}, "tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an Open Source model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type": "python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py", "function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs": {"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py", "function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP", "description": "Please specify the groundtruth column, which contains the true label to the outputs that your flow produces.", "is_chat_input": false}, "prediction": {"type": "string", "default": "APP", "description": "Please specify the prediction column, which contains the predicted outputs that your flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string", "reference": "${grade.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name", "flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "b37a1c40-7c6c-4246-a735-5a9a3881116b", "studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '13872' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.497' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name response: body: string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source": {"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}", "prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false}, {"name": "calculate_accuracy", "type": "python", "source": {"type": "code", "path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"}, "tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an Open Source model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type": "python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py", "function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs": {"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py", "function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP", "description": "Please specify the groundtruth column, which contains the true label to the outputs that your flow produces.", "is_chat_input": false}, "prediction": {"type": "string", "default": "APP", "description": "Please specify the prediction column, which contains the predicted outputs that your flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string", "reference": "${grade.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name", "flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "b37a1c40-7c6c-4246-a735-5a9a3881116b", "studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '13872' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.348' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name response: body: string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source": {"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}", "prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false}, {"name": "calculate_accuracy", "type": "python", "source": {"type": "code", "path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"}, "tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "self_harm_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "capabilities": {"completion": false, "chat_completion": false, "embeddings": true}, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Open AI''s embedding model to create an embedding vector representing the input text.", "module": "promptflow.tools.embedding", "function": "embedding", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm", "inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "endpoint_name": {"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens": {"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default": "{}", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}, "temperature": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default", "advanced": true}}, "description": "Use an Open Source model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module": "promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function": "call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V", "type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type": ["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"], "allow_manual_entry": true, "is_multi_select": false, "input_type": "default"}, "presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "stop": {"type": ["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "temperature": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name": "OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant, your task involves interpreting images and responding to questions about the image.\nRemember to provide accurate answers based on the information present in the image.\n\n# user:\nCan you tell me what the image depicts?\n![image]({{image_input}})\n", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type": "python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "engine": {"type": ["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "location": {"type": ["string"], "default": "", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "num": {"type": ["int"], "default": "10", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off", "enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Use Serp API to obtain search results from a specific search engine.", "module": "promptflow.tools.serpapi", "class_name": "SerpAPI", "function": "search", "is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python", "inputs": {"class_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "connection": {"type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "search_params": {"type": ["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "text_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "vector_field": {"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python", "inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "description": "Search text or vector based query from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": "VectorIndexLookup", "function": "search", "is_builtin": true, "package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs": false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type": "python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py", "function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs": {"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py", "function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP", "description": "Please specify the groundtruth column, which contains the true label to the outputs that your flow produces.", "is_chat_input": false}, "prediction": {"type": "string", "default": "APP", "description": "Please specify the prediction column, which contains the predicted outputs that your flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string", "reference": "${grade.output}", "evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name", "flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"}, "flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts", "flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "b37a1c40-7c6c-4246-a735-5a9a3881116b", "studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}' headers: connection: - keep-alive content-length: - '13872' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.475' status: code: 200 message: OK - request: body: '{}' headers: Accept: - '*/*' Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '2' Content-Type: - application/json User-Agent: - python-requests/2.31.0 method: POST uri: https://eastus.api.azureml.ms/metric/v2.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/runs/eval_run_name/lastvalues response: body: string: '{"value": [{"dataContainerId": "dcid.eval_run_name", "name": "__pf__.nodes.grade.completed", "columns": {"__pf__.nodes.grade.completed": "Double"}, "properties": {"uxMetricType": "azureml.v1.scalar", "dataLocation": null}, "namespace": null, "standardSchemaId": null, "value": [{"metricId": "9cbe0f5a-82c4-40ff-87e2-40c270dd7f68", "createdUtc": "2024-01-12T07:57:37.111+00:00", "step": 0, "data": {"__pf__.nodes.grade.completed": 3.0}}]}, {"dataContainerId": "dcid.eval_run_name", "name": "__pf__.nodes.calculate_accuracy.completed", "columns": {"__pf__.nodes.calculate_accuracy.completed": "Double"}, "properties": {"uxMetricType": "azureml.v1.scalar", "dataLocation": null}, "namespace": null, "standardSchemaId": null, "value": [{"metricId": "2fbf7813-bbc4-4dd4-82ed-220e2778ab5f", "createdUtc": "2024-01-12T07:57:37.434+00:00", "step": 0, "data": {"__pf__.nodes.calculate_accuracy.completed": 1.0}}]}, {"dataContainerId": "dcid.eval_run_name", "name": "__pf__.lines.completed", "columns": {"__pf__.lines.completed": "Double"}, "properties": {"uxMetricType": "azureml.v1.scalar", "dataLocation": null}, "namespace": null, "standardSchemaId": null, "value": [{"metricId": "0c34f54f-1a8a-4825-8a15-1a7c4e03b22a", "createdUtc": "2024-01-12T07:57:37.91+00:00", "step": 0, "data": {"__pf__.lines.completed": 3.0}}]}, {"dataContainerId": "dcid.eval_run_name", "name": "__pf__.lines.failed", "columns": {"__pf__.lines.failed": "Double"}, "properties": {"uxMetricType": "azureml.v1.scalar", "dataLocation": null}, "namespace": null, "standardSchemaId": null, "value": [{"metricId": "6a2c8b89-e811-4294-a89a-8cf5bdc2351b", "createdUtc": "2024-01-12T07:57:38.321+00:00", "step": 0, "data": {"__pf__.lines.failed": 0.0}}]}, {"dataContainerId": "dcid.eval_run_name", "name": "accuracy", "columns": {"accuracy": "Double"}, "properties": {"uxMetricType": "azureml.v1.scalar", "dataLocation": null}, "namespace": null, "standardSchemaId": null, "value": [{"metricId": "e120a64b-5fdf-4b60-8aea-a145404b773b", "createdUtc": "2024-01-12T07:57:38.689+00:00", "step": 0, "data": {"accuracy": 0.0}}]}]}' headers: connection: - keep-alive content-length: - '3117' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.053' status: code: 200 message: OK - request: body: '{"runId": "batch_run_name", "selectRunMetadata": true, "selectRunDefinition": true, "selectJobSpecification": true}' headers: Accept: - '*/*' Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '137' Content-Type: - application/json User-Agent: - python-requests/2.31.0 method: POST uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata response: body: string: '{"runMetadata": {"runNumber": 1705046185, "rootRunId": "batch_run_name", "createdUtc": "2024-01-12T07:56:25.2650296+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", "upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 6, "statusRevision": 3, "runUuid": "77d5c25b-bc78-4d80-b9f2-9a9f51ff5fcb", "parentRunUuid": null, "rootRunUuid": "77d5c25b-bc78-4d80-b9f2-9a9f51ff5fcb", "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:03.6314501", "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe", "upn": null}, "lastModifiedUtc": "2024-01-12T07:56:44.0488439+00:00", "duration": "00:00:03.6314501", "cancelationReason": null, "currentAttemptId": 1, "runId": "batch_run_name", "parentRunId": null, "experimentId": "b1e733a1-2a5f-4c17-bc34-4d66d2858228", "status": "Completed", "startTimeUtc": "2024-01-12T07:56:41.1891573+00:00", "endTimeUtc": "2024-01-12T07:56:44.8206074+00:00", "scheduleId": null, "displayName": "sdk-cli-test-fixture-batch-run-without-llm", "name": null, "dataContainerId": "dcid.batch_run_name", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": "AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name": "test-runtime-ci", "azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name": "flow.dag.yaml", "azureml.promptflow.session_id": "bee356189f7e7f18671a79369c78df4cfb1bbd0c99069074", "azureml.promptflow.flow_lineage_id": "f7ee724d91e4f4a7501bdc0b66995bc8b57f86b3a526fa2a81c34ebcccbbd912", "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", "azureml.promptflow.flow_definition_blob_path": "LocalUpload/36774154bc3ecde4aa21054b3052221f/hello-world/flow.dag.yaml", "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl", "azureml.promptflow.inputs_mapping": "{\"name\":\"${data.url}\"}", "_azureml.evaluation_run": "promptflow.BatchRun", "azureml.promptflow.snapshot_id": "90c907a3-78cc-4691-acfc-dcfab76a270a", "azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\": \"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1", "type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1", "type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings": null}' headers: connection: - keep-alive content-length: - '4649' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.077' status: code: 200 message: OK - request: body: '{"runId": "eval_run_name", "selectRunMetadata": true, "selectRunDefinition": true, "selectJobSpecification": true}' headers: Accept: - '*/*' Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Length: - '137' Content-Type: - application/json User-Agent: - python-requests/2.31.0 method: POST uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata response: body: string: '{"runMetadata": {"runNumber": 1705046237, "rootRunId": "eval_run_name", "createdUtc": "2024-01-12T07:57:17.8126536+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", "upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 6, "statusRevision": 3, "runUuid": "95db556c-4453-488b-bf1b-f98fffac1cb6", "parentRunUuid": null, "rootRunUuid": "95db556c-4453-488b-bf1b-f98fffac1cb6", "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:04.6913589", "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe", "upn": null}, "lastModifiedUtc": "2024-01-12T07:57:39.1237305+00:00", "duration": "00:00:04.6913589", "cancelationReason": null, "currentAttemptId": 1, "runId": "eval_run_name", "parentRunId": null, "experimentId": "7bdec279-f99c-4ed3-b0b8-dd75698b8fd0", "status": "Completed", "startTimeUtc": "2024-01-12T07:57:35.4702007+00:00", "endTimeUtc": "2024-01-12T07:57:40.1615596+00:00", "scheduleId": null, "displayName": "sdk-cli-test-fixture-eval-run-without-llm", "name": null, "dataContainerId": "dcid.eval_run_name", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": "AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name": "test-runtime-ci", "azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name": "flow.dag.yaml", "azureml.promptflow.session_id": "f8e4236a4e78e7f7125bbd811ec7976cb330412723a530f8", "azureml.promptflow.flow_lineage_id": "26c575d863a85371ef937096728441d8c68c3e737b5a1bfeae5ac8f3b9ccb048", "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", "azureml.promptflow.flow_definition_blob_path": "LocalUpload/1aa3064d06f6170abbc488cc35c713b9/eval-classification-accuracy/flow.dag.yaml", "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl", "azureml.promptflow.input_run_id": "batch_run_name", "azureml.promptflow.inputs_mapping": "{\"groundtruth\":\"${data.answer}\",\"prediction\":\"${run.outputs.result}\"}", "_azureml.evaluation_run": "promptflow.BatchRun", "azureml.promptflow.snapshot_id": "b37a1c40-7c6c-4246-a735-5a9a3881116b", "azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\": \"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_debug_info/versions/1", "type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_flow_outputs/versions/1", "type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings": null}' headers: connection: - keep-alive content-length: - '4797' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.042' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Type: - application/json User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name/logContent response: body: string: '"2024-01-12 07:56:29 +0000 49 promptflow-runtime INFO [batch_run_name] Receiving v2 bulk run request 44f9aee0-3f52-414a-baa4-56170db340a5: {\"flow_id\": \"batch_run_name\", \"flow_run_id\": \"batch_run_name\", \"flow_source\": {\"flow_source_type\": 1, \"flow_source_info\": {\"snapshot_id\": \"90c907a3-78cc-4691-acfc-dcfab76a270a\"}, \"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.batch_run_name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A46%3A24Z&ske=2024-01-13T15%3A56%3A24Z&sks=b&skv=2019-07-07&st=2024-01-12T07%3A46%3A28Z&se=2024-01-12T15%3A56%3A28Z&sp=rcw\", \"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\", \"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl\"}, \"inputs_mapping\": {\"name\": \"${data.url}\"}, \"azure_storage_setting\": {\"azure_storage_mode\": 1, \"storage_account_name\": \"promptfloweast4063704120\", \"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\", \"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/batch_run_name\", \"blob_container_sas_token\": \"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A56%3A28Z&ske=2024-01-19T07%3A56%3A28Z&sks=b&skv=2019-07-07&se=2024-01-19T07%3A56%3A28Z&sp=racwl\", \"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 07:56:29 +0000 49 promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version: 1.2.0rc1\n2024-01-12 07:56:29 +0000 49 promptflow-runtime INFO Updating batch_run_name to Status.Preparing...\n2024-01-12 07:56:29 +0000 49 promptflow-runtime INFO Downloading snapshot to /mnt/host/service/app/39649/requests/batch_run_name\n2024-01-12 07:56:29 +0000 49 promptflow-runtime INFO Get snapshot sas url for 90c907a3-78cc-4691-acfc-dcfab76a270a...\n2024-01-12 07:56:35 +0000 49 promptflow-runtime INFO Downloading snapshot 90c907a3-78cc-4691-acfc-dcfab76a270a from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/90c907a3-78cc-4691-acfc-dcfab76a270a.zip...\n2024-01-12 07:56:35 +0000 49 promptflow-runtime INFO Downloaded file /mnt/host/service/app/39649/requests/batch_run_name/90c907a3-78cc-4691-acfc-dcfab76a270a.zip with size 495 for snapshot 90c907a3-78cc-4691-acfc-dcfab76a270a.\n2024-01-12 07:56:35 +0000 49 promptflow-runtime INFO Download snapshot 90c907a3-78cc-4691-acfc-dcfab76a270a completed.\n2024-01-12 07:56:35 +0000 49 promptflow-runtime INFO Successfully download snapshot to /mnt/host/service/app/39649/requests/batch_run_name\n2024-01-12 07:56:35 +0000 49 promptflow-runtime INFO About to execute a python flow.\n2024-01-12 07:56:35 +0000 49 promptflow-runtime INFO Use spawn method to start child process.\n2024-01-12 07:56:36 +0000 49 promptflow-runtime INFO Starting to check process 3118 status for run batch_run_name\n2024-01-12 07:56:36 +0000 49 promptflow-runtime INFO Start checking run status for run batch_run_name\n2024-01-12 07:56:39 +0000 3118 promptflow-runtime INFO [49--3118] Start processing flowV2......\n2024-01-12 07:56:39 +0000 3118 promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version: 1.2.0rc1\n2024-01-12 07:56:39 +0000 3118 promptflow-runtime INFO Setting mlflow tracking uri...\n2024-01-12 07:56:39 +0000 3118 promptflow-runtime INFO Validating ''AzureML Data Scientist'' user authentication...\n2024-01-12 07:56:40 +0000 3118 promptflow-runtime INFO Successfully validated ''AzureML Data Scientist'' user authentication.\n2024-01-12 07:56:40 +0000 3118 promptflow-runtime INFO Using AzureMLRunStorageV2\n2024-01-12 07:56:40 +0000 3118 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12 07:56:40 +0000 3118 promptflow-runtime INFO Initialized blob service client for AzureMLRunTracker.\n2024-01-12 07:56:40 +0000 3118 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12 07:56:40 +0000 3118 promptflow-runtime INFO Resolve data from url finished in 0.4822304155677557 seconds\n2024-01-12 07:56:40 +0000 3118 promptflow-runtime INFO Starting the aml run ''batch_run_name''...\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Using fork, process count: 3\n2024-01-12 07:56:41 +0000 3161 execution.bulk INFO Process 3161 started.\n2024-01-12 07:56:41 +0000 3170 execution.bulk INFO Process 3170 started.\n2024-01-12 07:56:41 +0000 3165 execution.bulk INFO Process 3165 started.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Process name: ForkProcess-38:2, Process id: 3161, Line number: 0 start execution.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Process name: ForkProcess-38:3, Process id: 3170, Line number: 1 start execution.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Process name: ForkProcess-38:4, Process id: 3165, Line number: 2 start execution.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Process name: ForkProcess-38:2, Process id: 3161, Line number: 0 completed.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Finished 1 / 3 lines.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Process name: ForkProcess-38:4, Process id: 3165, Line number: 2 completed.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Process name: ForkProcess-38:3, Process id: 3170, Line number: 1 completed.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Average execution time for completed lines: 0.26 seconds. Estimated time for incomplete lines: 0.52 seconds.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Average execution time for completed lines: 0.1 seconds. Estimated time for incomplete lines: 0.0 seconds.\n2024-01-12 07:56:41 +0000 3118 execution.bulk INFO Average execution time for completed lines: 0.1 seconds. Estimated time for incomplete lines: 0.0 seconds.\n2024-01-12 07:56:43 +0000 3118 execution.bulk INFO Upload status summary metrics for run batch_run_name finished in 1.1319385208189487 seconds\n2024-01-12 07:56:44 +0000 3118 promptflow-runtime INFO Successfully write run properties {\"azureml.promptflow.total_tokens\": 0, \"_azureml.evaluate_artifacts\": \"[{\\\"path\\\": \\\"instance_results.jsonl\\\", \\\"type\\\": \\\"table\\\"}]\"} with run id ''batch_run_name''\n2024-01-12 07:56:44 +0000 3118 execution.bulk INFO Upload RH properties for run batch_run_name finished in 0.09029437880963087 seconds\n2024-01-12 07:56:44 +0000 3118 promptflow-runtime INFO Creating unregistered output Asset for Run batch_run_name...\n2024-01-12 07:56:44 +0000 3118 promptflow-runtime INFO Created debug_info Asset: azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1\n2024-01-12 07:56:44 +0000 3118 promptflow-runtime INFO Creating unregistered output Asset for Run batch_run_name...\n2024-01-12 07:56:44 +0000 3118 promptflow-runtime INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1\n2024-01-12 07:56:44 +0000 3118 promptflow-runtime INFO Creating Artifact for Run batch_run_name...\n2024-01-12 07:56:44 +0000 3118 promptflow-runtime INFO Created instance_results.jsonl Artifact.\n2024-01-12 07:56:44 +0000 3118 promptflow-runtime INFO Patching batch_run_name...\n2024-01-12 07:56:44 +0000 3118 promptflow-runtime INFO Ending the aml run ''batch_run_name'' with status ''Completed''...\n2024-01-12 07:56:46 +0000 49 promptflow-runtime INFO Process 3118 finished\n2024-01-12 07:56:46 +0000 49 promptflow-runtime INFO [49] Child process finished!\n2024-01-12 07:56:46 +0000 49 promptflow-runtime INFO [batch_run_name] End processing bulk run\n2024-01-12 07:56:46 +0000 49 promptflow-runtime INFO Cleanup working dir /mnt/host/service/app/39649/requests/batch_run_name for bulk run\n"' headers: connection: - keep-alive content-length: - '9814' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.748' status: code: 200 message: OK - request: body: null headers: Accept: - application/json Accept-Encoding: - gzip, deflate Connection: - keep-alive Content-Type: - application/json User-Agent: - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 (Windows-10-10.0.22631-SP0) method: GET uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name/logContent response: body: string: '"2024-01-12 07:57:22 +0000 49 promptflow-runtime INFO [eval_run_name] Receiving v2 bulk run request c27f8266-9fb7-41e8-befd-56ba05167594: {\"flow_id\": \"eval_run_name\", \"flow_run_id\": \"eval_run_name\", \"flow_source\": {\"flow_source_type\": 1, \"flow_source_info\": {\"snapshot_id\": \"b37a1c40-7c6c-4246-a735-5a9a3881116b\"}, \"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.eval_run_name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A46%3A24Z&ske=2024-01-13T15%3A56%3A24Z&sks=b&skv=2019-07-07&st=2024-01-12T07%3A47%3A21Z&se=2024-01-12T15%3A57%3A21Z&sp=rcw\", \"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\", \"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl\", \"run.outputs\": \"azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1\"}, \"inputs_mapping\": {\"groundtruth\": \"${data.answer}\", \"prediction\": \"${run.outputs.result}\"}, \"azure_storage_setting\": {\"azure_storage_mode\": 1, \"storage_account_name\": \"promptfloweast4063704120\", \"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\", \"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/eval_run_name\", \"blob_container_sas_token\": \"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A57%3A22Z&ske=2024-01-19T07%3A57%3A21Z&sks=b&skv=2019-07-07&se=2024-01-19T07%3A57%3A21Z&sp=racwl\", \"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 07:57:22 +0000 49 promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version: 1.2.0rc1\n2024-01-12 07:57:22 +0000 49 promptflow-runtime INFO Updating eval_run_name to Status.Preparing...\n2024-01-12 07:57:22 +0000 49 promptflow-runtime INFO Downloading snapshot to /mnt/host/service/app/39649/requests/eval_run_name\n2024-01-12 07:57:22 +0000 49 promptflow-runtime INFO Get snapshot sas url for b37a1c40-7c6c-4246-a735-5a9a3881116b...\n2024-01-12 07:57:29 +0000 49 promptflow-runtime INFO Downloading snapshot b37a1c40-7c6c-4246-a735-5a9a3881116b from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/b37a1c40-7c6c-4246-a735-5a9a3881116b.zip...\n2024-01-12 07:57:29 +0000 49 promptflow-runtime INFO Downloaded file /mnt/host/service/app/39649/requests/eval_run_name/b37a1c40-7c6c-4246-a735-5a9a3881116b.zip with size 1243 for snapshot b37a1c40-7c6c-4246-a735-5a9a3881116b.\n2024-01-12 07:57:29 +0000 49 promptflow-runtime INFO Download snapshot b37a1c40-7c6c-4246-a735-5a9a3881116b completed.\n2024-01-12 07:57:29 +0000 49 promptflow-runtime INFO Successfully download snapshot to /mnt/host/service/app/39649/requests/eval_run_name\n2024-01-12 07:57:29 +0000 49 promptflow-runtime INFO About to execute a python flow.\n2024-01-12 07:57:29 +0000 49 promptflow-runtime INFO Use spawn method to start child process.\n2024-01-12 07:57:29 +0000 49 promptflow-runtime INFO Starting to check process 3221 status for run eval_run_name\n2024-01-12 07:57:29 +0000 49 promptflow-runtime INFO Start checking run status for run eval_run_name\n2024-01-12 07:57:33 +0000 3221 promptflow-runtime INFO [49--3221] Start processing flowV2......\n2024-01-12 07:57:33 +0000 3221 promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version: 1.2.0rc1\n2024-01-12 07:57:33 +0000 3221 promptflow-runtime INFO Setting mlflow tracking uri...\n2024-01-12 07:57:33 +0000 3221 promptflow-runtime INFO Validating ''AzureML Data Scientist'' user authentication...\n2024-01-12 07:57:33 +0000 3221 promptflow-runtime INFO Successfully validated ''AzureML Data Scientist'' user authentication.\n2024-01-12 07:57:33 +0000 3221 promptflow-runtime INFO Using AzureMLRunStorageV2\n2024-01-12 07:57:33 +0000 3221 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12 07:57:33 +0000 3221 promptflow-runtime INFO Initialized blob service client for AzureMLRunTracker.\n2024-01-12 07:57:33 +0000 3221 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12 07:57:34 +0000 3221 promptflow-runtime INFO Resolve data from url finished in 0.6532626627013087 seconds\n2024-01-12 07:57:35 +0000 3221 promptflow-runtime INFO Resolve data from url finished in 0.6382788131013513 seconds\n2024-01-12 07:57:35 +0000 3221 promptflow-runtime INFO Starting the aml run ''eval_run_name''...\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Using fork, process count: 3\n2024-01-12 07:57:35 +0000 3264 execution.bulk INFO Process 3264 started.\n2024-01-12 07:57:35 +0000 3268 execution.bulk INFO Process 3268 started.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Process name: ForkProcess-40:2, Process id: 3264, Line number: 0 start execution.\n2024-01-12 07:57:35 +0000 3273 execution.bulk INFO Process 3273 started.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Process name: ForkProcess-40:3, Process id: 3268, Line number: 1 start execution.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Process name: ForkProcess-40:4, Process id: 3273, Line number: 2 start execution.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Process name: ForkProcess-40:2, Process id: 3264, Line number: 0 completed.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Finished 1 / 3 lines.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Average execution time for completed lines: 0.21 seconds. Estimated time for incomplete lines: 0.42 seconds.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Process name: ForkProcess-40:3, Process id: 3268, Line number: 1 completed.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Process name: ForkProcess-40:4, Process id: 3273, Line number: 2 completed.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12 07:57:35 +0000 3221 execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12 07:57:36 +0000 3221 execution.bulk INFO Average execution time for completed lines: 0.09 seconds. Estimated time for incomplete lines: 0.0 seconds.\n2024-01-12 07:57:36 +0000 3221 execution.bulk INFO Average execution time for completed lines: 0.09 seconds. Estimated time for incomplete lines: 0.0 seconds.\n2024-01-12 07:57:37 +0000 3221 execution.bulk INFO Executing aggregation nodes...\n2024-01-12 07:57:37 +0000 3221 execution.bulk INFO Finish executing aggregation nodes.\n2024-01-12 07:57:38 +0000 3221 execution.bulk INFO Upload status summary metrics for run eval_run_name finished in 1.5616551944985986 seconds\n2024-01-12 07:57:39 +0000 3221 execution.bulk INFO Upload metrics for run eval_run_name finished in 0.36921436339616776 seconds\n2024-01-12 07:57:39 +0000 3221 promptflow-runtime INFO Successfully write run properties {\"azureml.promptflow.total_tokens\": 0, \"_azureml.evaluate_artifacts\": \"[{\\\"path\\\": \\\"instance_results.jsonl\\\", \\\"type\\\": \\\"table\\\"}]\"} with run id ''eval_run_name''\n2024-01-12 07:57:39 +0000 3221 execution.bulk INFO Upload RH properties for run eval_run_name finished in 0.07229613605886698 seconds\n2024-01-12 07:57:39 +0000 3221 promptflow-runtime INFO Creating unregistered output Asset for Run eval_run_name...\n2024-01-12 07:57:39 +0000 3221 promptflow-runtime INFO Created debug_info Asset: azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_debug_info/versions/1\n2024-01-12 07:57:39 +0000 3221 promptflow-runtime INFO Creating unregistered output Asset for Run eval_run_name...\n2024-01-12 07:57:39 +0000 3221 promptflow-runtime INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_flow_outputs/versions/1\n2024-01-12 07:57:39 +0000 3221 promptflow-runtime INFO Creating Artifact for Run eval_run_name...\n2024-01-12 07:57:39 +0000 3221 promptflow-runtime INFO Created instance_results.jsonl Artifact.\n2024-01-12 07:57:39 +0000 3221 promptflow-runtime INFO Patching eval_run_name...\n2024-01-12 07:57:40 +0000 3221 promptflow-runtime INFO Ending the aml run ''eval_run_name'' with status ''Completed''...\n2024-01-12 07:57:41 +0000 49 promptflow-runtime INFO Process 3221 finished\n2024-01-12 07:57:41 +0000 49 promptflow-runtime INFO [49] Child process finished!\n2024-01-12 07:57:41 +0000 49 promptflow-runtime INFO [eval_run_name] End processing bulk run\n2024-01-12 07:57:41 +0000 49 promptflow-runtime INFO Cleanup working dir /mnt/host/service/app/39649/requests/eval_run_name for bulk run\n"' headers: connection: - keep-alive content-length: - '10617' content-type: - application/json; charset=utf-8 strict-transport-security: - max-age=15724800; includeSubDomains; preload transfer-encoding: - chunked vary: - Accept-Encoding x-content-type-options: - nosniff x-request-time: - '0.452' status: code: 200 message: OK version: 1
promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_metrics.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_metrics.yaml", "repo_id": "promptflow", "token_count": 90735 }
78
flow: ../flows/classification_accuracy_evaluation data: not_exist column_mapping: groundtruth: "${data.answer}" prediction: "${run.outputs.category}" run: flow_run_20230629_101205 # ./sample_bulk_run.yaml # run config: env related environment_variables: .env # optional connections: node_1: connection: test_llm_connection deployment_name: gpt-35-turbo
promptflow/src/promptflow/tests/test_configs/runs/illegal/non_exist_data.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/runs/illegal/non_exist_data.yaml", "repo_id": "promptflow", "token_count": 139 }
79
inputs: num: type: int outputs: content: type: string reference: "" nodes: - name: divide_num type: python source: type: code path: divide_num.py inputs: num: ${inputs.num}
promptflow/src/promptflow/tests/test_configs/wrong_flows/flow_output_reference_invalid/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/flow_output_reference_invalid/flow.dag.yaml", "repo_id": "promptflow", "token_count": 92 }
80
inputs: num: type: int outputs: content: type: string reference: ${stringify_num.output} nodes: - name: stringify_num type: python source: type: code path: stringify_num.py inputs: num: ${inputs.num} - name: stringify_num type: python source: type: code path: another_stringify_num.py inputs: num: ${inputs.num}
promptflow/src/promptflow/tests/test_configs/wrong_flows/nodes_names_duplicated/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/nodes_names_duplicated/flow.dag.yaml", "repo_id": "promptflow", "token_count": 155 }
81
inputs: text: type: string outputs: output: type: string reference: ${search_by_text.output.search_metadata} nodes: - name: search_by_text type: python source: type: package tool: promptflow.tools.serpapi11.SerpAPI.search inputs: connection: serp_connection query: ${inputs.text} num: 1
promptflow/src/promptflow/tests/test_configs/wrong_flows/wrong_package_in_package_tools/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/wrong_package_in_package_tools/flow.dag.yaml", "repo_id": "promptflow", "token_count": 131 }
82
# Use flow in Azure ML pipeline job After you have developed and tested the flow in [init and test a flow](../../how-to-guides/init-and-test-a-flow.md), this guide will help you learn how to use a flow as a parallel component in a pipeline job on AzureML, so that you can integrate the created flow with existing pipelines and process a large amount of data. :::{admonition} Pre-requirements - Customer need to install the extension `ml>=2.21.0` to enable this feature in CLI and package `azure-ai-ml>=1.11.0` to enable this feature in SDK; - Customer need to put `$schema` in the target `flow.dag.yaml` to enable this feature; - `flow.dag.yaml`: `$schema`: `https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json` - `run.yaml`: `$schema`: `https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json` - Customer need to generate `flow.tools.json` for the target flow before below usage. The generation can be done by `pf flow validate`. ::: For more information about AzureML and component: - [Install and set up the CLI(v2)](https://learn.microsoft.com/en-us/azure/machine-learning/how-to-configure-cli?view=azureml-api-2&tabs=public) - [Install and set up the SDK(v2)](https://learn.microsoft.com/en-us/python/api/overview/azure/ai-ml-readme?view=azure-python) - [What is a pipeline](https://learn.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines?view=azureml-api-2) - [What is a component](https://learn.microsoft.com/en-us/azure/machine-learning/concept-component?view=azureml-api-2) ## Register a flow as a component Customer can register a flow as a component with either CLI or SDK. ::::{tab-set} :::{tab-item} CLI :sync: CLI ```bash # Register flow as a component # Default component name will be the name of flow folder, which is not a valid component name, so we override it here; default version will be "1" az ml component create --file standard/web-classification/flow.dag.yaml --set name=web_classification # Register flow as a component with parameters override az ml component create --file standard/web-classification/flow.dag.yaml --version 2 --set name=web_classification_updated ``` ::: :::{tab-item} SDK :sync: SDK ```python from azure.ai.ml import MLClient, load_component ml_client = MLClient() # Register flow as a component flow_component = load_component("standard/web-classification/flow.dag.yaml") # Default component name will be the name of flow folder, which is not a valid component name, so we override it here; default version will be "1" flow_component.name = "web_classification" ml_client.components.create_or_update(flow_component) # Register flow as a component with parameters override ml_client.components.create_or_update( "standard/web-classification/flow.dag.yaml", version="2", params_override=[ {"name": "web_classification_updated"} ] ) ``` ::: :::: After registered a flow as a component, they can be referred in a pipeline job like [regular registered components](https://github.com/Azure/azureml-examples/tree/main/cli/jobs/pipelines-with-components/basics/1b_e2e_registered_components). ## Directly use a flow in a pipeline job Besides explicitly registering a flow as a component, customer can also directly use flow in a pipeline job: All connections and flow inputs will be exposed as input parameters of the component. Default value can be provided in flow/run definition; they can also be set/overwrite on job submission: ::::{tab-set} :::{tab-item} CLI :sync: CLI ```yaml ... jobs: flow_node: type: parallel component: standard/web-classification/flow.dag.yaml inputs: data: ${{parent.inputs.web_classification_input}} url: "${data.url}" connections.summarize_text_content.connection: azure_open_ai_connection connections.summarize_text_content.deployment_name: text-davinci-003 ... ``` Above is part of the pipeline job yaml, see here for [full example](https://github.com/Azure/azureml-examples/tree/main/cli/jobs/pipelines-with-components/pipeline_job_with_flow_as_component). ::: :::{tab-item} SDK :sync: SDK ```python from azure.identity import DefaultAzureCredential from azure.ai.ml import MLClient, load_component, Input from azure.ai.ml.dsl import pipeline credential = DefaultAzureCredential() ml_client = MLClient.from_config(credential=credential) data_input = Input(path="standard/web-classification/data.jsonl", type='uri_file') # Load flow as a component flow_component = load_component("standard/web-classification/flow.dag.yaml") @pipeline def pipeline_func_with_flow(data): flow_node = flow_component( data=data, url="${data.url}", connections={ "summarize_text_content": { "connection": "azure_open_ai_connection", "deployment_name": "text-davinci-003", }, }, ) flow_node.compute = "cpu-cluster" pipeline_with_flow = pipeline_func_with_flow(data=data_input) pipeline_job = ml_client.jobs.create_or_update(pipeline_with_flow) ml_client.jobs.stream(pipeline_job.name) ``` Above is part of the pipeline job python code, see here for [full example](https://github.com/Azure/azureml-examples/tree/main/sdk/python/jobs/pipelines/1l_flow_in_pipeline). ::: :::: ## Difference across flow in prompt flow and pipeline job In prompt flow, flow runs on [runtime](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/concept-runtime), which is designed for prompt flow and easy to customize; while in pipeline job, flow runs on different types of compute, and usually compute cluster. Given above, if your flow has logic relying on identity or environment variable, please be aware of this difference as you might run into some unexpected error(s) when the flow runs in pipeline job, and you might need some extra configurations to make it work.
promptflow/docs/cloud/azureai/use-flow-in-azure-ml-pipeline.md/0
{ "file_path": "promptflow/docs/cloud/azureai/use-flow-in-azure-ml-pipeline.md", "repo_id": "promptflow", "token_count": 1931 }
0
# Deploy a flow A flow can be deployed to multiple platforms, such as a local development service, Docker container, Kubernetes cluster, etc. ```{gallery-grid} :grid-columns: 1 2 2 3 - image: ../../media/how-to-guides/local.png content: "<center><b>Development server</b></center>" website: deploy-using-dev-server.html - image: ../../media/how-to-guides/docker.png content: "<center><b>Docker</b></center>" website: deploy-using-docker.html - image: ../../media/how-to-guides/kubernetes.png content: "<center><b>Kubernetes</b></center>" website: deploy-using-kubernetes.html ``` We also provide guides to deploy to cloud, such as azure app service: ```{gallery-grid} :grid-columns: 1 2 2 3 - image: ../../media/how-to-guides/appservice.png content: "<center><b>Azure App Service</b></center>" website: ../../cloud/azureai/deploy-to-azure-appservice.html ``` We are working on more official deployment guides for other hosting providers, and welcome user submitted guides. ```{toctree} :maxdepth: 1 :hidden: deploy-using-dev-server deploy-using-docker deploy-using-kubernetes distribute-flow-as-executable-app ```
promptflow/docs/how-to-guides/deploy-a-flow/index.md/0
{ "file_path": "promptflow/docs/how-to-guides/deploy-a-flow/index.md", "repo_id": "promptflow", "token_count": 397 }
1
# Execute flow as a function :::{admonition} Experimental feature This is an experimental feature, and may change at any time. Learn [more](faq.md#stable-vs-experimental). ::: ## Overview Promptflow allows you to load a flow and use it as a function in your code. This feature is useful when building a service on top of a flow, reference [here](https://github.com/microsoft/promptflow/tree/main/examples/tutorials/flow-deploy/create-service-with-flow) for a simple example service with flow function consumption. ## Load an invoke the flow function To use the flow-as-function feature, you first need to load a flow using the `load_flow` function. Then you can consume the flow object like a function by providing key-value arguments for it. ```python f = load_flow("../../examples/flows/standard/web-classification/") f(url="sample_url") ``` ## Config the flow with context You can overwrite some flow configs before flow function execution by setting `flow.context`. ### Load flow as a function with in-memory connection override By providing a connection object to flow context, flow won't need to get connection in execution time, which can save time when for cases where flow function need to be called multiple times. ```python from promptflow.entities import AzureOpenAIConnection connection_obj = AzureOpenAIConnection( name=conn_name, api_key=api_key, api_base=api_base, api_type="azure", api_version=api_version, ) # no need to create the connection object. f.context = FlowContext( connections={"classify_with_llm": {"connection": connection_obj}} ) ``` ### Local flow as a function with flow inputs override By providing overrides, the original flow dag will be updated in execution time. ```python f.context = FlowContext( # node "fetch_text_content_from_url" will take inputs from the following command instead of from flow input overrides={"nodes.fetch_text_content_from_url.inputs.url": sample_url}, ) ``` **Note**, the `overrides` are only doing YAML content replacement on original `flow.dag.yaml`. If the `flow.dag.yaml` become invalid after `overrides`, validation error will be raised when executing. ### Load flow as a function with streaming output After set `streaming` in flow context, the flow function will return an iterator to stream the output. ```python f = load_flow(source="../../examples/flows/chat/basic-chat/") f.context.streaming = True result = f( chat_history=[ { "inputs": {"chat_input": "Hi"}, "outputs": {"chat_output": "Hello! How can I assist you today?"}, } ], question="How are you?", ) answer = "" # the result will be a generator, iterate it to get the result for r in result["answer"]: answer += r ``` Reference our [sample](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/get-started/flow-as-function.ipynb) for usage. ## Next steps Learn more about: - [Flow as a function sample](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/get-started/flow-as-function.ipynb) - [Deploy a flow](./deploy-a-flow/index.md)
promptflow/docs/how-to-guides/execute-flow-as-a-function.md/0
{ "file_path": "promptflow/docs/how-to-guides/execute-flow-as-a-function.md", "repo_id": "promptflow", "token_count": 969 }
2
# Custom Tools This section contains documentation for custom tools created by the community to extend Prompt flow's capabilities for specific use cases. These tools are developed following the guide on [Creating and Using Tool Packages](../../how-to-guides/develop-a-tool/create-and-use-tool-package.md). They are not officially maintained or endorsed by the Prompt flow team. For questions or issues when using a tool, please use the support contact link in the table below. ## Tool Package Index The table below provides an index of custom tool packages. The columns contain: - **Package Name:** The name of the tool package. Links to the package documentation. - **Description:** A short summary of what the tool package does. - **Owner:** The creator/maintainer of the tool package. - **Support Contact:** Link to contact for support and reporting new issues. | Package Name | Description | Owner | Support Contact | |-|-|-|-| | promptflow-azure-ai-language | Collection of Azure AI Language Prompt flow tools. | Sean Murray | [email protected] | ```{toctree} :maxdepth: 1 :hidden: azure-ai-language-tool ```
promptflow/docs/integrations/tools/index.md/0
{ "file_path": "promptflow/docs/integrations/tools/index.md", "repo_id": "promptflow", "token_count": 287 }
3
# Vector DB Lookup Vector DB Lookup is a vector search tool that allows users to search top k similar vectors from vector database. This tool is a wrapper for multiple third-party vector databases. The list of current supported databases is as follows. | Name | Description | | --- | --- | | Azure Cognitive Search | Microsoft's cloud search service with built-in AI capabilities that enrich all types of information to help identify and explore relevant content at scale. | | Qdrant | Qdrant is a vector similarity search engine that provides a production-ready service with a convenient API to store, search and manage points (i.e. vectors) with an additional payload. | | Weaviate | Weaviate is an open source vector database that stores both objects and vectors. This allows for combining vector search with structured filtering. | This tool will support more vector databases. ## Requirements - For AzureML users, the tool is installed in default image, you can use the tool without extra installation. - For local users, `pip install promptflow-vectordb` ## Prerequisites The tool searches data from a third-party vector database. To use it, you should create resources in advance and establish connection between the tool and the resource. - **Azure Cognitive Search:** - Create resource [Azure Cognitive Search](https://learn.microsoft.com/en-us/azure/search/search-create-service-portal). - Add "Cognitive search" connection. Fill "API key" field with "Primary admin key" from "Keys" section of created resource, and fill "API base" field with the URL, the URL format is `https://{your_serive_name}.search.windows.net`. - **Qdrant:** - Follow the [installation](https://qdrant.tech/documentation/quick-start/) to deploy Qdrant to a self-maintained cloud server. - Add "Qdrant" connection. Fill "API base" with your self-maintained cloud server address and fill "API key" field. - **Weaviate:** - Follow the [installation](https://weaviate.io/developers/weaviate/installation) to deploy Weaviate to a self-maintained instance. - Add "Weaviate" connection. Fill "API base" with your self-maintained instance address and fill "API key" field. ## Inputs The tool accepts the following inputs: - **Azure Cognitive Search:** | Name | Type | Description | Required | | ---- | ---- | ----------- | -------- | | connection | CognitiveSearchConnection | The created connection for accessing to Cognitive Search endpoint. | Yes | | index_name | string | The index name created in Cognitive Search resource. | Yes | | text_field | string | The text field name. The returned text field will populate the text of output. | No | | vector_field | string | The vector field name. The target vector is searched in this vector field. | Yes | | search_params | dict | The search parameters. It's key-value pairs. Except for parameters in the tool input list mentioned above, additional search parameters can be formed into a JSON object as search_params. For example, use `{"select": ""}` as search_params to select the returned fields, use `{"search": ""}` to perform a [hybrid search](https://learn.microsoft.com/en-us/azure/search/search-get-started-vector#hybrid-search). | No | | search_filters | dict | The search filters. It's key-value pairs, the input format is like `{"filter": ""}` | No | | vector | list | The target vector to be queried, which can be generated by Embedding tool. | Yes | | top_k | int | The count of top-scored entities to return. Default value is 3 | No | - **Qdrant:** | Name | Type | Description | Required | | ---- | ---- | ----------- | -------- | | connection | QdrantConnection | The created connection for accessing to Qdrant server. | Yes | | collection_name | string | The collection name created in self-maintained cloud server. | Yes | | text_field | string | The text field name. The returned text field will populate the text of output. | No | | search_params | dict | The search parameters can be formed into a JSON object as search_params. For example, use `{"params": {"hnsw_ef": 0, "exact": false, "quantization": null}}` to set search_params. | No | | search_filters | dict | The search filters. It's key-value pairs, the input format is like `{"filter": {"should": [{"key": "", "match": {"value": ""}}]}}` | No | | vector | list | The target vector to be queried, which can be generated by Embedding tool. | Yes | | top_k | int | The count of top-scored entities to return. Default value is 3 | No | - **Weaviate:** | Name | Type | Description | Required | | ---- | ---- | ----------- | -------- | | connection | WeaviateConnection | The created connection for accessing to Weaviate. | Yes | | class_name | string | The class name. | Yes | | text_field | string | The text field name. The returned text field will populate the text of output. | No | | vector | list | The target vector to be queried, which can be generated by Embedding tool. | Yes | | top_k | int | The count of top-scored entities to return. Default value is 3 | No | ## Outputs The following is an example JSON format response returned by the tool, which includes the top-k scored entities. The entity follows a generic schema of vector search result provided by promptflow-vectordb SDK. - **Azure Cognitive Search:** For Azure Cognitive Search, the following fields are populated: | Field Name | Type | Description | | ---- | ---- | ----------- | | original_entity | dict | the original response json from search REST API| | score | float | @search.score from the original entity, which evaluates the similarity between the entity and the query vector | | text | string | text of the entity| | vector | list | vector of the entity| <details> <summary>Output</summary> ```json [ { "metadata": null, "original_entity": { "@search.score": 0.5099789, "id": "", "your_text_filed_name": "sample text1", "your_vector_filed_name": [-0.40517663431890405, 0.5856996257406859, -0.1593078462266455, -0.9776269170785785, -0.6145604369828972], "your_additional_field_name": "" }, "score": 0.5099789, "text": "sample text1", "vector": [-0.40517663431890405, 0.5856996257406859, -0.1593078462266455, -0.9776269170785785, -0.6145604369828972] } ] ``` </details> - **Qdrant:** For Qdrant, the following fields are populated: | Field Name | Type | Description | | ---- | ---- | ----------- | | original_entity | dict | the original response json from search REST API| | metadata | dict | payload from the original entity| | score | float | score from the original entity, which evaluates the similarity between the entity and the query vector| | text | string | text of the payload| | vector | list | vector of the entity| <details> <summary>Output</summary> ```json [ { "metadata": { "text": "sample text1" }, "original_entity": { "id": 1, "payload": { "text": "sample text1" }, "score": 1, "vector": [0.18257418, 0.36514837, 0.5477226, 0.73029673], "version": 0 }, "score": 1, "text": "sample text1", "vector": [0.18257418, 0.36514837, 0.5477226, 0.73029673] } ] ``` </details> - **Weaviate:** For Weaviate, the following fields are populated: | Field Name | Type | Description | | ---- | ---- | ----------- | | original_entity | dict | the original response json from search REST API| | score | float | certainty from the original entity, which evaluates the similarity between the entity and the query vector| | text | string | text in the original entity| | vector | list | vector of the entity| <details> <summary>Output</summary> ```json [ { "metadata": null, "original_entity": { "_additional": { "certainty": 1, "distance": 0, "vector": [ 0.58, 0.59, 0.6, 0.61, 0.62 ] }, "text": "sample text1." }, "score": 1, "text": "sample text1.", "vector": [ 0.58, 0.59, 0.6, 0.61, 0.62 ] } ] ``` </details>
promptflow/docs/reference/tools-reference/vector_db_lookup_tool.md/0
{ "file_path": "promptflow/docs/reference/tools-reference/vector_db_lookup_tool.md", "repo_id": "promptflow", "token_count": 2697 }
4
# Basic Chat This example shows how to create a basic chat flow. It demonstrates how to create a chatbot that can remember previous interactions and use the conversation history to generate next message. Tools used in this flow: - `llm` tool ## Prerequisites Install promptflow sdk and other dependencies in this folder: ```bash pip install -r requirements.txt ``` ## What you will learn In this flow, you will learn - how to compose a chat flow. - prompt template format of LLM tool chat api. Message delimiter is a separate line containing role name and colon: "system:", "user:", "assistant:". See <a href="https://platform.openai.com/docs/api-reference/chat/create#chat/create-role" target="_blank">OpenAI Chat</a> for more about message role. ```jinja system: You are a chatbot having a conversation with a human. user: {{question}} ``` - how to consume chat history in prompt. ```jinja {% for item in chat_history %} user: {{item.inputs.question}} assistant: {{item.outputs.answer}} {% endfor %} ``` ## Getting started ### 1 Create connection for LLM tool to use Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of LLM tool supported connection types and fill in the configurations. Currently, there are two connection types supported by LLM tool: "AzureOpenAI" and "OpenAI". If you want to use "AzureOpenAI" connection type, you need to create an Azure OpenAI service first. Please refer to [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service/) for more details. If you want to use "OpenAI" connection type, you need to create an OpenAI account first. Please refer to [OpenAI](https://platform.openai.com/) for more details. ```bash # Override keys with --set to avoid yaml file changes pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection ``` Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`. ```bash # show registered connection pf connection show --name open_ai_connection ``` ### 2 Start chatting ```bash # run chat flow with default question in flow.dag.yaml pf flow test --flow . # run chat flow with new question pf flow test --flow . --inputs question="What's Azure Machine Learning?" # start a interactive chat session in CLI pf flow test --flow . --interactive # start a interactive chat session in CLI with verbose info pf flow test --flow . --interactive --verbose ```
promptflow/examples/flows/chat/basic-chat/README.md/0
{ "file_path": "promptflow/examples/flows/chat/basic-chat/README.md", "repo_id": "promptflow", "token_count": 769 }
5
import faiss from jinja2 import Environment, FileSystemLoader import os from utils.index import FAISSIndex from utils.oai import OAIEmbedding, render_with_token_limit from utils.logging import log def find_context(question: str, index_path: str): index = FAISSIndex(index=faiss.IndexFlatL2(1536), embedding=OAIEmbedding()) index.load(path=index_path) snippets = index.query(question, top_k=5) template = Environment( loader=FileSystemLoader(os.path.dirname(os.path.abspath(__file__))) ).get_template("qna_prompt.md") token_limit = int(os.environ.get("PROMPT_TOKEN_LIMIT")) # Try to render the template with token limit and reduce snippet count if it fails while True: try: prompt = render_with_token_limit( template, token_limit, question=question, context=enumerate(snippets) ) break except ValueError: snippets = snippets[:-1] log(f"Reducing snippet count to {len(snippets)} to fit token limit") return prompt, snippets
promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/find_context.py/0
{ "file_path": "promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/find_context.py", "repo_id": "promptflow", "token_count": 422 }
6
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: groundtruth: type: string default: "1" prediction: type: string default: "2" outputs: score: type: string reference: ${line_process.output} nodes: - name: line_process type: python source: type: code path: line_process.py inputs: groundtruth: ${inputs.groundtruth} prediction: ${inputs.prediction} - name: aggregate type: python source: type: code path: aggregate.py inputs: processed_results: ${line_process.output} aggregation: true
promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/flow.dag.yaml/0
{ "file_path": "promptflow/examples/flows/evaluation/eval-accuracy-maths-to-code/flow.dag.yaml", "repo_id": "promptflow", "token_count": 228 }
7
from typing import List from promptflow import log_metric, tool @tool def calculate_accuracy(grades: List[str]): result = [] for index in range(len(grades)): grade = grades[index] result.append(grade) # calculate accuracy for each variant accuracy = round((result.count("Correct") / len(result)), 2) log_metric("accuracy", accuracy) return result
promptflow/examples/flows/evaluation/eval-classification-accuracy/calculate_accuracy.py/0
{ "file_path": "promptflow/examples/flows/evaluation/eval-classification-accuracy/calculate_accuracy.py", "repo_id": "promptflow", "token_count": 135 }
8
from promptflow import tool from collections import Counter @tool def compute_f1_score(ground_truth: str, answer: str) -> str: import string import re class QASplitTokenizer: def __call__(self, line): """Tokenizes an input line using split() on whitespace :param line: a segment to tokenize :return: the tokenized line """ return line.split() def normalize_text(text) -> str: """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punctuation(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punctuation(lower(text)))) prediction_tokens = normalize_text(answer) reference_tokens = normalize_text(ground_truth) tokenizer = QASplitTokenizer() prediction_tokens = tokenizer(prediction_tokens) reference_tokens = tokenizer(reference_tokens) common_tokens = Counter(prediction_tokens) & Counter(reference_tokens) num_common_tokens = sum(common_tokens.values()) if num_common_tokens == 0: f1 = 0.0 else: precision = 1.0 * num_common_tokens / len(prediction_tokens) recall = 1.0 * num_common_tokens / len(reference_tokens) f1 = (2.0 * precision * recall) / (precision + recall) return f1
promptflow/examples/flows/evaluation/eval-qna-non-rag/f1_score.py/0
{ "file_path": "promptflow/examples/flows/evaluation/eval-qna-non-rag/f1_score.py", "repo_id": "promptflow", "token_count": 692 }
9
from promptflow import tool import re @tool def parse_grounding_output(rag_grounding_score: str) -> str: try: numbers_found = re.findall(r"Quality score:\s*(\d+)\/\d", rag_grounding_score) score = float(numbers_found[0]) if len(numbers_found) > 0 else 0 except Exception: score = float("nan") try: quality_reasoning, _ = rag_grounding_score.split("Quality score: ") except Exception: quality_reasoning = rag_grounding_score return {"quality_score": score, "quality_reasoning": quality_reasoning}
promptflow/examples/flows/evaluation/eval-qna-rag-metrics/parse_groundedness_score.py/0
{ "file_path": "promptflow/examples/flows/evaluation/eval-qna-rag-metrics/parse_groundedness_score.py", "repo_id": "promptflow", "token_count": 214 }
10
from typing import Union from openai.version import VERSION as OPENAI_VERSION from promptflow import tool from promptflow.connections import CustomConnection, AzureOpenAIConnection # The inputs section will change based on the arguments of the tool function, after you save the code # Adding type to arguments and return value will help the system show the types properly # Please update the function name/signature per need def to_bool(value) -> bool: return str(value).lower() == "true" def get_client(connection: Union[CustomConnection, AzureOpenAIConnection]): if OPENAI_VERSION.startswith("0."): raise Exception( "Please upgrade your OpenAI package to version >= 1.0.0 or using the command: pip install --upgrade openai." ) # connection can be extract as a dict object contains the configs and secrets connection_dict = dict(connection) api_key = connection_dict.get("api_key") conn = dict( api_key=api_key, ) if api_key.startswith("sk-"): from openai import OpenAI as Client else: from openai import AzureOpenAI as Client conn.update( azure_endpoint=connection_dict.get("api_base"), api_version=connection_dict.get("api_version", "2023-07-01-preview"), ) return Client(**conn) @tool def my_python_tool( prompt: str, # for AOAI, deployment name is customized by user, not model name. deployment_name: str, suffix: str = None, max_tokens: int = 120, temperature: float = 1.0, top_p: float = 1.0, n: int = 1, logprobs: int = None, echo: bool = False, stop: list = None, presence_penalty: float = 0, frequency_penalty: float = 0, best_of: int = 1, logit_bias: dict = {}, user: str = "", connection: Union[CustomConnection, AzureOpenAIConnection] = None, **kwargs, ) -> str: # TODO: remove below type conversion after client can pass json rather than string. echo = to_bool(echo) response = get_client(connection).completions.create( prompt=prompt, model=deployment_name, # empty string suffix should be treated as None. suffix=suffix if suffix else None, max_tokens=int(max_tokens), temperature=float(temperature), top_p=float(top_p), n=int(n), logprobs=int(logprobs) if logprobs else None, echo=echo, # fix bug "[] is not valid under any of the given schemas-'stop'" stop=stop if stop else None, presence_penalty=float(presence_penalty), frequency_penalty=float(frequency_penalty), best_of=int(best_of), # Logit bias must be a dict if we passed it to openai api. logit_bias=logit_bias if logit_bias else {}, user=user, ) # get first element because prompt is single. return response.choices[0].text
promptflow/examples/flows/standard/basic-with-connection/hello.py/0
{ "file_path": "promptflow/examples/flows/standard/basic-with-connection/hello.py", "repo_id": "promptflow", "token_count": 1109 }
11
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: question: type: string default: What is Prompt flow? outputs: answer: type: string reference: ${generate_result.output} nodes: - name: content_safety_check type: python source: type: code path: content_safety_check.py inputs: text: ${inputs.question} - name: llm_result type: python source: type: code path: llm_result.py inputs: question: ${inputs.question} activate: when: ${content_safety_check.output} is: true - name: default_result type: python source: type: code path: default_result.py inputs: question: ${inputs.question} activate: when: ${content_safety_check.output} is: false - name: generate_result type: python source: type: code path: generate_result.py inputs: llm_result: ${llm_result.output} default_result: ${default_result.output} environment: python_requirements_txt: requirements.txt
promptflow/examples/flows/standard/conditional-flow-for-if-else/flow.dag.yaml/0
{ "file_path": "promptflow/examples/flows/standard/conditional-flow-for-if-else/flow.dag.yaml", "repo_id": "promptflow", "token_count": 386 }
12
CHAT_DEPLOYMENT_NAME=gpt-35-turbo AZURE_OPENAI_API_KEY=<your_AOAI_key> AZURE_OPENAI_API_BASE=<your_AOAI_endpoint>
promptflow/examples/flows/standard/customer-intent-extraction/.env.example/0
{ "file_path": "promptflow/examples/flows/standard/customer-intent-extraction/.env.example", "repo_id": "promptflow", "token_count": 61 }
13
from promptflow import tool from divider import Divider from typing import List @tool def combine_code(divided: List[str]): code = Divider.combine(divided) return code
promptflow/examples/flows/standard/gen-docstring/combine_code_tool.py/0
{ "file_path": "promptflow/examples/flows/standard/gen-docstring/combine_code_tool.py", "repo_id": "promptflow", "token_count": 57 }
14
from promptflow import tool import sys from io import StringIO @tool def func_exe(code_snippet: str): if code_snippet == "JSONDecodeError" or code_snippet.startswith("Unknown Error:"): return code_snippet # Define the result variable before executing the code snippet old_stdout = sys.stdout redirected_output = sys.stdout = StringIO() # Execute the code snippet try: exec(code_snippet.lstrip()) except Exception as e: sys.stdout = old_stdout return str(e) sys.stdout = old_stdout return redirected_output.getvalue().strip() if __name__ == "__main__": print(func_exe("print(5+3)")) print(func_exe("count = 0\nfor i in range(100):\n if i % 8 == 0:\n count += 1\nprint(count)")) print(func_exe("sum = 0\ni = 0\nwhile 3**i < 100:\n sum += 3**i\n i += 1\nprint(sum)")) print(func_exe("speed_A = 80\nspeed_B = 120\ndistance = 2000\ntime = distance / (speed_A + speed_B)\nprint(time)")) print(func_exe("Unknown Error")) print(func_exe("JSONDecodeError"))
promptflow/examples/flows/standard/maths-to-code/code_execution.py/0
{ "file_path": "promptflow/examples/flows/standard/maths-to-code/code_execution.py", "repo_id": "promptflow", "token_count": 432 }
15
include my_tool_package/yamls/*.yaml
promptflow/examples/tools/tool-package-quickstart/MANIFEST.in/0
{ "file_path": "promptflow/examples/tools/tool-package-quickstart/MANIFEST.in", "repo_id": "promptflow", "token_count": 14 }
16
my_tool_package.tools.my_tool_2.MyTool.my_tool: class_name: MyTool function: my_tool inputs: connection: type: - CustomConnection input_text: type: - string module: my_tool_package.tools.my_tool_2 name: My Second Tool description: This is my second tool type: python
promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/my_tool_2.yaml/0
{ "file_path": "promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/my_tool_2.yaml", "repo_id": "promptflow", "token_count": 126 }
17
import json import pytest import unittest from my_tool_package.tools.tool_with_generated_by_input import ( generate_index_json, list_embedding_deployment, list_fields, list_indexes, list_index_types, list_semantic_configuration, my_tool, reverse_generate_index_json, ) @pytest.mark.parametrize("index_type", ["Azure Cognitive Search", "Workspace MLIndex"]) def test_my_tool(index_type): index_json = generate_index_json(index_type=index_type) result = my_tool(index_json, "", "") assert result == f'Hello {index_json}' def test_generate_index_json(): index_type = "Azure Cognitive Search" index_json = generate_index_json(index_type=index_type) indexes = json.loads(index_json) assert indexes["index_type"] == index_type def test_reverse_generate_index_json(): index_type = "Workspace MLIndex" index = list_indexes("", "", "") inputs = { "index_type": index_type, "index": index, "index_connection": "retrieved_index_connection", "index_name": "retrieved_index_name", "content_field": "retrieved_content_field", "embedding_field": "retrieved_embedding_field", "metadata_field": "retrieved_metadata_field", "semantic_configuration": "retrieved_semantic_configuration", "embedding_connection": "retrieved_embedding_connection", "embedding_deployment": "retrieved_embedding_deployment" } input_json = json.dumps(inputs) result = reverse_generate_index_json(input_json) for k, v in inputs.items(): assert result[k] == v def test_list_index_types(): result = list_index_types("", "", "") assert isinstance(result, list) assert len(result) == 5 def test_list_indexes(): result = list_indexes("", "", "") assert isinstance(result, list) assert len(result) == 10 for item in result: assert isinstance(item, dict) def test_list_fields(): result = list_fields("", "", "") assert isinstance(result, list) assert len(result) == 9 for item in result: assert isinstance(item, dict) def test_list_semantic_configuration(): result = list_semantic_configuration("", "", "") assert len(result) == 1 assert isinstance(result[0], dict) def test_list_embedding_deployment(): result = list_embedding_deployment("") assert len(result) == 2 for item in result: assert isinstance(item, dict) if __name__ == "__main__": unittest.main()
promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_generated_by_input.py/0
{ "file_path": "promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_generated_by_input.py", "repo_id": "promptflow", "token_count": 967 }
18
# Flow with custom_llm tool This is a flow demonstrating how to use a `custom_llm` tool, which enables users to seamlessly connect to a large language model with prompt tuning experience using a `PromptTemplate`. Tools used in this flow: - `custom_llm` Tool Connections used in this flow: - custom connection ## Prerequisites Install promptflow sdk and other dependencies: ```bash pip install -r requirements.txt ``` ## Setup connection Create connection if you haven't done that. ```bash # Override keys with --set to avoid yaml file changes pf connection create -f custom_connection.yml --set secrets.api_key=<your_api_key> configs.api_base=<your_api_base> ``` Ensure you have created `basic_custom_connection` connection. ```bash pf connection show -n basic_custom_connection ``` ## Run flow - Test flow ```bash pf flow test --flow . ```
promptflow/examples/tools/use-cases/custom_llm_tool_showcase/README.md/0
{ "file_path": "promptflow/examples/tools/use-cases/custom_llm_tool_showcase/README.md", "repo_id": "promptflow", "token_count": 249 }
19
# Enforce the check of pipelines. # This script will get the diff of the current branch and main branch, calculate the pipelines that should be triggered. # Then it will check if the triggered pipelines are successful. This script will loop for 30*loop-times seconds at most. # How many checks are triggered: # 1. sdk checks: sdk_cli_tests, sdk_cli_azure_test, sdk_cli_global_config_tests are triggered. # 2. examples checks: this script calculate the path filters and decide what should be triggered. # Trigger checks and return the status of the checks: # 1. If examples are not correctly generated, fail. # 2. If required pipelines are not triggered within 6 rounds of loops, fail. # 2.1 (special_care global variable could help on some pipelines that need to bypass the check) # Check pipelines succeed or not: # 1. These pipelines should return status within loop-times rounds. # 2. If there is failed pipeline in the triggered pipelines, fail. # Import necessary libraries import os import fnmatch import subprocess import time import argparse import json import sys # Define variables github_repository = "microsoft/promptflow" snippet_debug = os.getenv("SNIPPET_DEBUG", 0) merge_commit = "" loop_times = 30 github_workspace = os.path.expanduser("~/promptflow/") # Special cases for pipelines that need to be triggered more or less than default value 1. # If 0, the pipeline will not be ignored in check enforcer. # Please notice that the key should be the Job Name in the pipeline. special_care = { "sdk_cli_tests": 4, "sdk_cli_azure_test": 4, # "samples_connections_connection": 0, } # Copy from original yaml pipelines checks = { "sdk_cli_tests": [ "src/promptflow/**", "scripts/building/**", ".github/workflows/promptflow-sdk-cli-test.yml", ], "sdk_cli_global_config_tests": [ "src/promptflow/**", "scripts/building/**", ".github/workflows/promptflow-global-config-test.yml", ], "sdk_cli_azure_test": [ "src/promptflow/**", "scripts/building/**", ".github/workflows/promptflow-sdk-cli-azure-test.yml", ], } reverse_checks = {} pipelines = {} pipelines_count = {} failed_reason = "" # Define functions def trigger_checks(valid_status_array): global failed_reason global github_repository global merge_commit global snippet_debug global pipelines global pipelines_count output = subprocess.check_output( f"gh api /repos/{github_repository}/commits/{merge_commit}/check-suites?per_page=100", shell=True, ) check_suites = json.loads(output)["check_suites"] for suite in check_suites: if snippet_debug != 0: print(f"check-suites id {suite['id']}") suite_id = suite["id"] output = subprocess.check_output( f"gh api /repos/{github_repository}/check-suites/{suite_id}/check-runs?per_page=100", shell=True, ) check_runs = json.loads(output)["check_runs"] for run in check_runs: if snippet_debug != 0: print(f"check runs name {run['name']}") for key in pipelines.keys(): value = pipelines[key] if value == 0: continue if key in run["name"]: pipelines_count[key] += 1 valid_status_array.append(run) for key in pipelines.keys(): if pipelines_count[key] < pipelines[key]: failed_reason = "Not all pipelines are triggered." def status_checks(valid_status_array): global failed_reason global pipelines global pipelines_count # Basic fact of sdk cli checked pipelines. failed_reason = "" # Loop through each valid status array. for status in valid_status_array: # Check if the pipeline was successful. if status["conclusion"] and status["conclusion"].lower() == "success": # Add 1 to the count of successful pipelines. pass # Check if the pipeline failed. elif status["conclusion"] and status["conclusion"].lower() == "failure": failed_reason = "Required pipelines are not successful." # Check if the pipeline is still running. else: if failed_reason == "": failed_reason = "Required pipelines are not finished." # Print the status of the pipeline to the console. print(status["name"] + " is checking.") def trigger_prepare(input_paths): global github_workspace global checks global reverse_checks global pipelines global pipelines_count global failed_reason global special_care for input_path in input_paths: if "samples_connections_connection" in checks: continue # Check if the input path contains "examples" or "samples". if "examples" in input_path or "samples" in input_path: sys.path.append(os.path.expanduser(github_workspace + "/scripts/readme")) from readme import main as readme_main os.chdir(os.path.expanduser(github_workspace)) # Get the list of pipelines from the readme file. pipelines_samples = readme_main(check=True) git_diff_files = [ item for item in subprocess.check_output( ["git", "diff", "--name-only", "HEAD"] ) .decode("utf-8") .split("\n") if item != "" ] for _ in git_diff_files: failed_reason = "Run readme generation before check in" return # Merge the pipelines from the readme file with the original list of pipelines. for key in pipelines_samples.keys(): value = pipelines_samples[key] checks[key] = value # Reverse checks. for key in checks.keys(): value = checks[key] for path in value: if path in reverse_checks: reverse_checks[path].append(key) else: reverse_checks[path] = [key] # Render pipelines and pipelines_count using input_paths. for input_path in input_paths: # Input pattern /**: input_path should match in the middle. # Input pattern /*: input_path should match last but one. # Other input pattern: input_path should match last. keys = [ key for key in reverse_checks.keys() if fnmatch.fnmatch(input_path, key) ] # Loop through each key in the list of keys. for key_item in keys: # Loop through each pipeline in the list of pipelines. for key in reverse_checks[key_item]: # Check if the pipeline is in the list of pipelines. if key in special_care: pipelines[key] = special_care[key] else: pipelines[key] = 1 # Set the pipeline count to 0. pipelines_count[key] = 0 def run_checks(): global github_repository global snippet_debug global merge_commit global loop_times global github_workspace global failed_reason if merge_commit == "": merge_commit = ( subprocess.check_output(["git", "log", "-1"]).decode("utf-8").split("\n") ) if snippet_debug != 0: print(merge_commit) for line in merge_commit: if "Merge" in line and "into" in line: merge_commit = line.split(" ")[-3] break if snippet_debug != 0: print("MergeCommit " + merge_commit) not_started_counter = 5 os.chdir(github_workspace) # Get diff of current branch and main branch. try: git_merge_base = ( subprocess.check_output(["git", "merge-base", "origin/main", "HEAD"]) .decode("utf-8") .rstrip() ) git_diff = ( subprocess.check_output( ["git", "diff", "--name-only", "--diff-filter=d", f"{git_merge_base}"], stderr=subprocess.STDOUT, ) .decode("utf-8") .rstrip() .split("\n") ) except subprocess.CalledProcessError as e: print("Exception on process, rc=", e.returncode, "output=", e.output) raise e # Prepare how many pipelines should be triggered. trigger_prepare(git_diff) if failed_reason != "": raise Exception(failed_reason) # Loop for 15 minutes at most. for i in range(loop_times): # Wait for 30 seconds. time.sleep(30) # Reset the failed reason. failed_reason = "" # Reset the valid status array. valid_status_array = [] # Get all triggered pipelines. # If not all pipelines are triggered, continue. trigger_checks(valid_status_array) if failed_reason != "": if not_started_counter == 0: raise Exception(failed_reason + " for 6 times.") print(failed_reason) not_started_counter -= 1 continue # Get pipeline conclusion priority: # 1. Not successful, Fail. # 2. Not finished, Continue. # 3. Successful, Break. status_checks(valid_status_array) # Check if the failed reason contains "not successful". if "not successful" in failed_reason.lower(): raise Exception(failed_reason) # Check if the failed reason contains "not finished". elif "not finished" in failed_reason.lower(): print(failed_reason) continue # Otherwise, print that all required pipelines are successful. else: print("All required pipelines are successful.") break # Check if the failed reason is not empty. if failed_reason != "": raise Exception(failed_reason) if __name__ == "__main__": # Run the checks. parser = argparse.ArgumentParser() parser.add_argument( "-m", "--merge-commit", help="merge commit sha", ) parser.add_argument( "-n", "--loop-times", type=int, help="Loop times", ) parser.add_argument( "-t", "--github-workspace", help="base path of github workspace", ) args = parser.parse_args() if args.merge_commit: merge_commit = args.merge_commit if args.loop_times: loop_times = args.loop_times if args.github_workspace: github_workspace = args.github_workspace run_checks()
promptflow/scripts/check_enforcer/check_enforcer.py/0
{ "file_path": "promptflow/scripts/check_enforcer/check_enforcer.py", "repo_id": "promptflow", "token_count": 4517 }
20
#!/usr/bin/env python # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # # This script will install the promptflow into a directory and create an executable # at a specified file path that is the entry point into the promptflow. # # The latest versions of all promptflow command packages will be installed. # import os import sys import platform import stat import tempfile import shutil import subprocess import hashlib PF_DISPATCH_TEMPLATE = """#!/usr/bin/env bash export PF_INSTALLER=Script {install_dir}/bin/python -m promptflow._cli._pf.entry "$@" """ PFAZURE_DISPATCH_TEMPLATE = """#!/usr/bin/env bash {install_dir}/bin/python -m promptflow._cli._pf_azure.entry "$@" """ PFS_DISPATCH_TEMPLATE = """#!/usr/bin/env bash {install_dir}/bin/python -m promptflow._sdk._service.entry "$@" """ DEFAULT_INSTALL_DIR = os.path.expanduser(os.path.join('~', 'lib', 'promptflow')) DEFAULT_EXEC_DIR = os.path.expanduser(os.path.join('~', 'bin')) PF_EXECUTABLE_NAME = 'pf' PFAZURE_EXECUTABLE_NAME = 'pfazure' PFS_EXECUTABLE_NAME = 'pfs' USER_BASH_RC = os.path.expanduser(os.path.join('~', '.bashrc')) USER_BASH_PROFILE = os.path.expanduser(os.path.join('~', '.bash_profile')) class CLIInstallError(Exception): pass def print_status(msg=''): print('-- '+msg) def prompt_input(msg): return input('\n===> '+msg) def prompt_input_with_default(msg, default): if default: return prompt_input("{} (leave blank to use '{}'): ".format(msg, default)) or default else: return prompt_input('{}: '.format(msg)) def prompt_y_n(msg, default=None): if default not in [None, 'y', 'n']: raise ValueError("Valid values for default are 'y', 'n' or None") y = 'Y' if default == 'y' else 'y' n = 'N' if default == 'n' else 'n' while True: ans = prompt_input('{} ({}/{}): '.format(msg, y, n)) if ans.lower() == n.lower(): return False if ans.lower() == y.lower(): return True if default and not ans: return default == y.lower() def exec_command(command_list, cwd=None, env=None): print_status('Executing: '+str(command_list)) subprocess.check_call(command_list, cwd=cwd, env=env) def create_tmp_dir(): tmp_dir = tempfile.mkdtemp() return tmp_dir def create_dir(dir): if not os.path.isdir(dir): print_status("Creating directory '{}'.".format(dir)) os.makedirs(dir) def is_valid_sha256sum(a_file, expected_sum): sha256 = hashlib.sha256() with open(a_file, 'rb') as f: sha256.update(f.read()) computed_hash = sha256.hexdigest() return expected_sum == computed_hash def create_virtualenv(install_dir): cmd = [sys.executable, '-m', 'venv', install_dir] exec_command(cmd) def install_cli(install_dir, tmp_dir): path_to_pip = os.path.join(install_dir, 'bin', 'pip') cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'promptflow[azure,executable,pfs,azureml-serving]', '--upgrade'] exec_command(cmd) cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'promptflow-tools', '--upgrade'] exec_command(cmd) cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'keyrings.alt', '--upgrade'] exec_command(cmd) def create_executable(exec_dir, install_dir): create_dir(exec_dir) exec_filepaths = [] for filename, template in [(PF_EXECUTABLE_NAME, PF_DISPATCH_TEMPLATE), (PFAZURE_EXECUTABLE_NAME, PFAZURE_DISPATCH_TEMPLATE), (PFS_EXECUTABLE_NAME, PFS_DISPATCH_TEMPLATE)]: exec_filepath = os.path.join(exec_dir, filename) with open(exec_filepath, 'w') as exec_file: exec_file.write(template.format(install_dir=install_dir)) cur_stat = os.stat(exec_filepath) os.chmod(exec_filepath, cur_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) print_status("The executable is available at '{}'.".format(exec_filepath)) exec_filepaths.append(exec_filepath) return exec_filepaths def get_install_dir(): install_dir = None while not install_dir: prompt_message = 'In what directory would you like to place the install?' install_dir = prompt_input_with_default(prompt_message, DEFAULT_INSTALL_DIR) install_dir = os.path.realpath(os.path.expanduser(install_dir)) if ' ' in install_dir: print_status("The install directory '{}' cannot contain spaces.".format(install_dir)) install_dir = None else: create_dir(install_dir) if os.listdir(install_dir): print_status("'{}' is not empty and may contain a previous installation.".format(install_dir)) ans_yes = prompt_y_n('Remove this directory?', 'n') if ans_yes: shutil.rmtree(install_dir) print_status("Deleted '{}'.".format(install_dir)) create_dir(install_dir) else: # User opted to not delete the directory so ask for install directory again install_dir = None print_status("We will install at '{}'.".format(install_dir)) return install_dir def get_exec_dir(): exec_dir = None while not exec_dir: prompt_message = (f"In what directory would you like to place the " f"'{PFS_EXECUTABLE_NAME}/{PFS_EXECUTABLE_NAME}/{PFAZURE_EXECUTABLE_NAME}' executable?") exec_dir = prompt_input_with_default(prompt_message, DEFAULT_EXEC_DIR) exec_dir = os.path.realpath(os.path.expanduser(exec_dir)) if ' ' in exec_dir: print_status("The executable directory '{}' cannot contain spaces.".format(exec_dir)) exec_dir = None create_dir(exec_dir) print_status("The executable will be in '{}'.".format(exec_dir)) return exec_dir def _backup_rc(rc_file): try: shutil.copyfile(rc_file, rc_file+'.backup') print_status("Backed up '{}' to '{}'".format(rc_file, rc_file+'.backup')) except (OSError, IOError): pass def _get_default_rc_file(): bashrc_exists = os.path.isfile(USER_BASH_RC) bash_profile_exists = os.path.isfile(USER_BASH_PROFILE) if not bashrc_exists and bash_profile_exists: return USER_BASH_PROFILE if bashrc_exists and bash_profile_exists and platform.system().lower() == 'darwin': return USER_BASH_PROFILE return USER_BASH_RC if bashrc_exists else None def _default_rc_file_creation_step(): rcfile = USER_BASH_PROFILE if platform.system().lower() == 'darwin' else USER_BASH_RC ans_yes = prompt_y_n('Could not automatically find a suitable file to use. Create {} now?'.format(rcfile), default='y') if ans_yes: open(rcfile, 'a').close() return rcfile return None def _find_line_in_file(file_path, search_pattern): try: with open(file_path, 'r', encoding="utf-8") as search_file: for line in search_file: if search_pattern in line: return True except (OSError, IOError): pass return False def _modify_rc(rc_file_path, line_to_add): if not _find_line_in_file(rc_file_path, line_to_add): with open(rc_file_path, 'a', encoding="utf-8") as rc_file: rc_file.write('\n'+line_to_add+'\n') def get_rc_file_path(): rc_file = None default_rc_file = _get_default_rc_file() if not default_rc_file: rc_file = _default_rc_file_creation_step() rc_file = rc_file or prompt_input_with_default('Enter a path to an rc file to update', default_rc_file) if rc_file: rc_file_path = os.path.realpath(os.path.expanduser(rc_file)) if os.path.isfile(rc_file_path): return rc_file_path print_status("The file '{}' could not be found.".format(rc_file_path)) return None def warn_other_azs_on_path(exec_dir, exec_filepath): env_path = os.environ.get('PATH') conflicting_paths = [] if env_path: for p in env_path.split(':'): for file in [PF_EXECUTABLE_NAME, PFAZURE_EXECUTABLE_NAME, PFS_EXECUTABLE_NAME]: p_to_pf = os.path.join(p, file) if p != exec_dir and os.path.isfile(p_to_pf): conflicting_paths.append(p_to_pf) if conflicting_paths: print_status() print_status(f"** WARNING: Other '{PFS_EXECUTABLE_NAME}/{PFS_EXECUTABLE_NAME}/{PFAZURE_EXECUTABLE_NAME}' " f"executables are on your $PATH. **") print_status("Conflicting paths: {}".format(', '.join(conflicting_paths))) print_status("You can run this installation of the promptflow with '{}'.".format(exec_filepath)) def handle_path_and_tab_completion(exec_filepath, exec_dir): ans_yes = prompt_y_n('Modify profile to update your $PATH now?', 'y') if ans_yes: rc_file_path = get_rc_file_path() if not rc_file_path: raise CLIInstallError('No suitable profile file found.') _backup_rc(rc_file_path) line_to_add = "export PATH=$PATH:{}".format(exec_dir) _modify_rc(rc_file_path, line_to_add) warn_other_azs_on_path(exec_dir, exec_filepath) print_status() print_status('** Run `exec -l $SHELL` to restart your shell. **') print_status() else: print_status("You can run the promptflow with '{}'.".format(exec_filepath)) def verify_python_version(): print_status('Verifying Python version.') v = sys.version_info if v < (3, 8): raise CLIInstallError('The promptflow does not support Python versions less than 3.8.') if 'conda' in sys.version: raise CLIInstallError("This script does not support the Python Anaconda environment. " "Create an Anaconda virtual environment and install with 'pip'") print_status('Python version {}.{}.{} okay.'.format(v.major, v.minor, v.micro)) def _native_dependencies_for_dist(verify_cmd_args, install_cmd_args, dep_list): try: print_status("Executing: '{} {}'".format(' '.join(verify_cmd_args), ' '.join(dep_list))) subprocess.check_output(verify_cmd_args + dep_list, stderr=subprocess.STDOUT) print_status('Native dependencies okay.') except subprocess.CalledProcessError: err_msg = 'One or more of the following native dependencies are not currently installed and may be required.\n' err_msg += '"{}"'.format(' '.join(install_cmd_args + dep_list)) print_status(err_msg) ans_yes = prompt_y_n('Missing native dependencies. Attempt to continue anyway?', 'n') if not ans_yes: raise CLIInstallError('Please install the native dependencies and try again.') def _get_linux_distro(): if platform.system() != 'Linux': return None, None try: with open('/etc/os-release') as lines: tokens = [line.strip() for line in lines] except Exception: return None, None release_info = {} for token in tokens: if '=' in token: k, v = token.split('=', 1) release_info[k.lower()] = v.strip('"') return release_info.get('name', None), release_info.get('version_id', None) def verify_install_dir_exec_path_conflict(install_dir, exec_dir): for exec_name in [PF_EXECUTABLE_NAME, PFAZURE_EXECUTABLE_NAME, PFS_EXECUTABLE_NAME]: exec_path = os.path.join(exec_dir, exec_name) if install_dir == exec_path: raise CLIInstallError("The executable file '{}' would clash with the install directory of '{}'. Choose " "either a different install directory or directory to place the " "executable.".format(exec_path, install_dir)) def main(): verify_python_version() tmp_dir = create_tmp_dir() install_dir = get_install_dir() exec_dir = get_exec_dir() verify_install_dir_exec_path_conflict(install_dir, exec_dir) create_virtualenv(install_dir) install_cli(install_dir, tmp_dir) exec_filepath = create_executable(exec_dir, install_dir) try: handle_path_and_tab_completion(exec_filepath, exec_dir) except Exception as e: print_status("Unable to set up PATH. ERROR: {}".format(str(e))) shutil.rmtree(tmp_dir) print_status("Installation successful.") print_status("Run the CLI with {} --help".format(exec_filepath)) if __name__ == '__main__': try: main() except CLIInstallError as cie: print('ERROR: '+str(cie), file=sys.stderr) sys.exit(1) except KeyboardInterrupt: print('\n\nExiting...') sys.exit(1) # SIG # Begin signature block # Z1F07ShfIJ7kejST2NXwW1QcFPEya4xaO2xZz6vLT847zaMzbc/PaEa1RKFlD881 # 4J+i6Au2wtbHzOXDisyH6WeLQ3gh0X2gxFRa4EzW7Nzjcvwm4+WogiTcnPVVxlk3 # qafM/oyVqs3695K7W5XttOiq2guv/yedsf/TW2BKSEKruFQh9IwDfIiBoi9Zv3wa # iuzQulRR8KyrCtjEPDV0t4WnZVB/edQea6xJZeTlMG+uLR/miBTbPhUb/VZkVjBf # qHBv623oLXICzoTNuaPTln9OWvL2NZpisGYvNzebKO7/Ho6AOWZNs5XOVnjs0Ax2 # aeXvlwBzIQyfyxd25487/Q== # SIG # End signature block
promptflow/scripts/installer/curl_install_pypi/install.py/0
{ "file_path": "promptflow/scripts/installer/curl_install_pypi/install.py", "repo_id": "promptflow", "token_count": 5824 }
21
{ "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { "EagerFlowSchema": { "properties": { "additional_includes": { "title": "additional_includes", "type": "array", "items": { "title": "additional_includes", "type": "string" } }, "description": { "title": "description", "type": "string" }, "display_name": { "title": "display_name", "type": "string" }, "entry": { "title": "entry", "type": "string" }, "environment": { "title": "environment", "type": "object", "additionalProperties": {} }, "language": { "title": "language", "type": "string" }, "path": { "title": "path", "type": "string" }, "$schema": { "title": "$schema", "type": "string", "readOnly": true }, "tags": { "title": "tags", "type": "object", "additionalProperties": { "title": "tags", "type": "string" } }, "type": { "title": "type", "type": "string", "enum": [ "standard", "evaluation", "chat" ], "enumNames": [] } }, "type": "object", "required": [ "entry", "path" ], "additionalProperties": false } }, "$ref": "#/definitions/EagerFlowSchema" }
promptflow/scripts/json_schema/EagerFlow.schema.json/0
{ "file_path": "promptflow/scripts/json_schema/EagerFlow.schema.json", "repo_id": "promptflow", "token_count": 1541 }
22
- name: {{ step_name }} working-directory: {{ working_dir }} run: | AOAI_API_KEY=${{ '{{' }} secrets.AOAI_GPT_4V_KEY }} AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_GPT_4V_ENDPOINT }} AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) cp ../../../connections/azure_openai.yml ./azure_openai.yml sed -i -e "s/<user-input>/$AOAI_API_KEY/g" -e "s/aoai-api-endpoint/$AOAI_API_ENDPOINT/g" azure_openai.yml
promptflow/scripts/readme/ghactions_driver/workflow_steps/step_create_env_gpt4.yml.jinja2/0
{ "file_path": "promptflow/scripts/readme/ghactions_driver/workflow_steps/step_create_env_gpt4.yml.jinja2", "repo_id": "promptflow", "token_count": 218 }
23
import argparse from pathlib import Path from functools import reduce from ghactions_driver.readme_workflow_generate import write_readme_workflow from ghactions_driver.readme_step import ReadmeStepsManage, ReadmeSteps from ghactions_driver.readme_parse import readme_parser from ghactions_driver.telemetry_obj import Telemetry def local_filter(callback, array: [Path]): results = [] for index, item in enumerate(array): result = callback(item, index, array) # if returned true, append item to results if result: results.append(item) return results def no_readme_generation_filter(item: Path, index, array) -> bool: """ If there is no steps in the readme, then no generation """ try: if 'build' in str(item): # skip build folder return False full_text = readme_parser(item.relative_to(ReadmeStepsManage.git_base_dir())) if full_text == "": return False else: return True except Exception as error: print(error) return False # generate readme def main(input_glob, exclude_glob=[], output_files=[]): def set_add(p, q): return p | q def set_difference(p, q): return p - q globs = reduce(set_add, [set(Path(ReadmeStepsManage.git_base_dir()).glob(p)) for p in input_glob], set()) globs_exclude = reduce(set_difference, [set(Path(ReadmeStepsManage.git_base_dir()).glob(p)) for p in exclude_glob], globs) readme_items = sorted([i for i in globs_exclude]) readme_items = local_filter(no_readme_generation_filter, readme_items) for readme in readme_items: readme_telemetry = Telemetry() workflow_name = readme.relative_to(ReadmeStepsManage.git_base_dir()) # Deal with readme write_readme_workflow(workflow_name.resolve(), readme_telemetry) ReadmeSteps.cleanup() output_files.append(readme_telemetry) if __name__ == "__main__": # setup argparse parser = argparse.ArgumentParser() parser.add_argument( "-g", "--input-glob", nargs="+", help="Input Readme.md glob example 'examples/flows/**/Readme.md'", ) args = parser.parse_args() # call main main(args.input_glob)
promptflow/scripts/readme/readme_generator.py/0
{ "file_path": "promptflow/scripts/readme/readme_generator.py", "repo_id": "promptflow", "token_count": 983 }
24
from setuptools import find_packages, setup PACKAGE_NAME = "{{ package_name }}" setup( name=PACKAGE_NAME, version="0.0.1", description="This is my tools package", packages=find_packages(), entry_points={ "package_tools": ["{{ tool_name }} = {{ package_name }}.tools.utils:list_package_tools"], }, include_package_data=True, # This line tells setuptools to include files from MANIFEST.in )
promptflow/scripts/tool/templates/setup.py.j2/0
{ "file_path": "promptflow/scripts/tool/templates/setup.py.j2", "repo_id": "promptflow", "token_count": 157 }
25
import functools import json import os import re import requests import sys import time import tempfile from abc import abstractmethod from datetime import datetime, timedelta from enum import Enum from typing import Any, Dict, List, Tuple, Optional, Union from promptflow._core.tool import ToolProvider, tool from promptflow._sdk._constants import ConnectionType from promptflow.connections import CustomConnection from promptflow.contracts.types import PromptTemplate from promptflow.tools.common import render_jinja_template, validate_role from promptflow.tools.exception import ( OpenModelLLMOnlineEndpointError, OpenModelLLMUserError, OpenModelLLMKeyValidationError, ChatAPIInvalidRole ) DEPLOYMENT_DEFAULT = "default" CONNECTION_CACHE_FILE = "pf_connection_names" VALID_LLAMA_ROLES = {"system", "user", "assistant"} AUTH_REQUIRED_CONNECTION_TYPES = {"serverlessendpoint", "onlineendpoint", "connection"} REQUIRED_CONFIG_KEYS = ["endpoint_url", "model_family"] REQUIRED_SECRET_KEYS = ["endpoint_api_key"] ENDPOINT_REQUIRED_ENV_VARS = ["AZUREML_ARM_SUBSCRIPTION", "AZUREML_ARM_RESOURCEGROUP", "AZUREML_ARM_WORKSPACE_NAME"] def handle_online_endpoint_error(max_retries: int = 5, initial_delay: float = 2, exponential_base: float = 3): def deco_retry(func): @functools.wraps(func) def wrapper(*args, **kwargs): delay = initial_delay for i in range(max_retries): try: return func(*args, **kwargs) except OpenModelLLMOnlineEndpointError as e: if i == max_retries - 1: error_message = f"Exception hit calling Online Endpoint: {type(e).__name__}: {str(e)}" print(error_message, file=sys.stderr) raise OpenModelLLMOnlineEndpointError(message=error_message) delay *= exponential_base time.sleep(delay) return wrapper return deco_retry class ConnectionCache: def __init__(self, use_until: datetime, subscription_id: str, resource_group: str, workspace_name: str, connection_names: List[str]): self.use_until = use_until self.subscription_id = subscription_id self.resource_group = resource_group self.workspace_name = workspace_name self.connection_names = connection_names @classmethod def from_filename(self, file): cache = json.load(file) return self(cache['use_until'], cache['subscription_id'], cache['resource_group'], cache['workspace_name'], cache['connection_names']) def can_use(self, subscription_id: str, resource_group: str, workspace_name: str): use_until_time = datetime.fromisoformat(self.use_until) return (use_until_time > datetime.now() and self.subscription_id == subscription_id and self.resource_group == resource_group and self.workspace_name == workspace_name) class Endpoint: def __init__(self, endpoint_name: str, endpoint_url: str, endpoint_api_key: str): self.deployments: List[Deployment] = [] self.default_deployment: Deployment = None self.endpoint_url = endpoint_url self.endpoint_api_key = endpoint_api_key self.endpoint_name = endpoint_name class Deployment: def __init__(self, deployment_name: str, model_family: str): self.model_family = model_family self.deployment_name = deployment_name class ServerlessEndpointsContainer: API_VERSION = "2023-08-01-preview" def _get_headers(self, token: str) -> Dict[str, str]: headers = { "Authorization": f"Bearer {token}", "Content-Type": "application/json", } return headers def get_serverless_arm_url(self, subscription_id, resource_group, workspace_name, suffix=None): suffix = "" if suffix is None else f"/{suffix}" return f"https://management.azure.com/subscriptions/{subscription_id}" \ + f"/resourceGroups/{resource_group}/providers/Microsoft.MachineLearningServices" \ + f"/workspaces/{workspace_name}/serverlessEndpoints{suffix}?api-version={self.API_VERSION}" def _list(self, token: str, subscription_id: str, resource_group: str, workspace_name: str): headers = self._get_headers(token) url = self.get_serverless_arm_url(subscription_id, resource_group, workspace_name) try: response = requests.get(url, headers=headers, timeout=50) return json.loads(response.content)['value'] except Exception as e: print(f"Error encountered when listing serverless endpoints. Exception: {e}", file=sys.stderr) return [] def _validate_model_family(self, serverless_endpoint): try: if serverless_endpoint.get('properties', {}).get('provisioningState') != "Succeeded": return None if (try_get_from_dict(serverless_endpoint, ['properties', 'offer', 'publisher']) == 'Meta' and "llama" in try_get_from_dict(serverless_endpoint, ['properties', 'offer', 'offerName'])): return ModelFamily.LLAMA if (try_get_from_dict(serverless_endpoint, ['properties', 'marketplaceInfo', 'publisherId']) == 'metagenai' and "llama" in try_get_from_dict(serverless_endpoint, ['properties', 'marketplaceInfo', 'offerId'])): return ModelFamily.LLAMA except Exception as ex: print(f"Ignoring endpoint {serverless_endpoint['id']} due to error: {ex}", file=sys.stderr) return None def list_serverless_endpoints(self, token, subscription_id, resource_group, workspace_name, return_endpoint_url: bool = False): serverlessEndpoints = self._list(token, subscription_id, resource_group, workspace_name) result = [] for e in serverlessEndpoints: if (self._validate_model_family(e)): result.append({ "value": f"serverlessEndpoint/{e['name']}", "display_value": f"[Serverless] {e['name']}", # "hyperlink": self.get_endpoint_url(e.endpoint_name) "description": f"Serverless Endpoint: {e['name']}", }) if return_endpoint_url: result[-1]['url'] = try_get_from_dict(e, ['properties', 'inferenceEndpoint', 'uri']) return result def _list_endpoint_key(self, token: str, subscription_id: str, resource_group: str, workspace_name: str, serverless_endpoint_name: str): headers = self._get_headers(token) url = self.get_serverless_arm_url(subscription_id, resource_group, workspace_name, f"{serverless_endpoint_name}/listKeys") try: response = requests.post(url, headers=headers, timeout=50) return json.loads(response.content) except Exception as e: print(f"Unable to get key from selected serverless endpoint. Exception: {e}", file=sys.stderr) def get_serverless_endpoint(self, token: str, subscription_id: str, resource_group: str, workspace_name: str, serverless_endpoint_name: str): headers = self._get_headers(token) url = self.get_serverless_arm_url(subscription_id, resource_group, workspace_name, serverless_endpoint_name) try: response = requests.get(url, headers=headers, timeout=50) return json.loads(response.content) except Exception as e: print(f"Unable to get selected serverless endpoint. Exception: {e}", file=sys.stderr) def get_serverless_endpoint_key(self, token: str, subscription_id: str, resource_group: str, workspace_name: str, serverless_endpoint_name: str) -> Tuple[str, str, str]: endpoint = self.get_serverless_endpoint(token, subscription_id, resource_group, workspace_name, serverless_endpoint_name) endpoint_url = try_get_from_dict(endpoint, ['properties', 'inferenceEndpoint', 'uri']) model_family = self._validate_model_family(endpoint) endpoint_api_key = self._list_endpoint_key(token, subscription_id, resource_group, workspace_name, serverless_endpoint_name)['primaryKey'] return (endpoint_url, endpoint_api_key, model_family) class CustomConnectionsContainer: def get_azure_custom_connection_names(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str, return_endpoint_url: bool = False ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: result = [] try: from promptflow.azure import PFClient as AzurePFClient azure_pf_client = AzurePFClient( credential=credential, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name) except Exception: message = "Skipping Azure PFClient. To connect, please ensure the following environment variables are set: " message += ",".join(ENDPOINT_REQUIRED_ENV_VARS) print(message, file=sys.stderr) return result connections = azure_pf_client._connections.list() for c in connections: if c.type == ConnectionType.CUSTOM and "model_family" in c.configs: try: validate_model_family(c.configs["model_family"]) result.append({ "value": f"connection/{c.name}", "display_value": f"[Connection] {c.name}", # "hyperlink": "", "description": f"Custom Connection: {c.name}", }) if return_endpoint_url: result[-1]['url'] = c.configs['endpoint_url'] except Exception: # silently ignore unsupported model family continue return result def get_local_custom_connection_names(self, return_endpoint_url: bool = False ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: result = [] try: from promptflow import PFClient as LocalPFClient except Exception as e: print(f"Skipping Local PFClient. Exception: {e}", file=sys.stderr) return result pf = LocalPFClient() connections = pf.connections.list() for c in connections: if c.type == ConnectionType.CUSTOM and "model_family" in c.configs: try: validate_model_family(c.configs["model_family"]) result.append({ "value": f"localConnection/{c.name}", "display_value": f"[Local Connection] {c.name}", # "hyperlink": "", "description": f"Local Custom Connection: {c.name}", }) if return_endpoint_url: result[-1]['url'] = c.configs['endpoint_url'] except Exception: # silently ignore unsupported model family continue return result def get_endpoint_from_local_custom_connection(self, connection_name) -> Tuple[str, str, str]: from promptflow import PFClient as LocalPFClient pf = LocalPFClient() connection = pf.connections.get(connection_name, with_secrets=True) return self.get_endpoint_from_custom_connection(connection) def get_endpoint_from_azure_custom_connection(self, credential, subscription_id, resource_group_name, workspace_name, connection_name) -> Tuple[str, str, str]: from promptflow.azure import PFClient as AzurePFClient azure_pf_client = AzurePFClient( credential=credential, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name) connection = azure_pf_client._arm_connections.get(connection_name) return self.get_endpoint_from_custom_connection(connection) def get_endpoint_from_custom_connection(self, connection: CustomConnection) -> Tuple[str, str, str]: conn_dict = dict(connection) for key in REQUIRED_CONFIG_KEYS: if key not in conn_dict: accepted_keys = ",".join([key for key in REQUIRED_CONFIG_KEYS]) raise OpenModelLLMKeyValidationError( message=f"""Required key `{key}` not found in given custom connection. Required keys are: {accepted_keys}.""" ) for key in REQUIRED_SECRET_KEYS: if key not in conn_dict: accepted_keys = ",".join([key for key in REQUIRED_SECRET_KEYS]) raise OpenModelLLMKeyValidationError( message=f"""Required secret key `{key}` not found in given custom connection. Required keys are: {accepted_keys}.""" ) model_family = validate_model_family(connection.configs['model_family']) return (connection.configs['endpoint_url'], connection.secrets['endpoint_api_key'], model_family) def list_custom_connection_names(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str, return_endpoint_url: bool = False ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: azure_custom_connections = self.get_azure_custom_connection_names(credential, subscription_id, resource_group_name, workspace_name, return_endpoint_url) local_custom_connections = self.get_local_custom_connection_names(return_endpoint_url) return azure_custom_connections + local_custom_connections class EndpointsContainer: def get_ml_client(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str): try: from azure.ai.ml import MLClient return MLClient( credential=credential, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name) except Exception as e: message = "Unable to connect to AzureML. Please ensure the following environment variables are set: " message += ",".join(ENDPOINT_REQUIRED_ENV_VARS) message += "\nException: " + str(e) raise OpenModelLLMOnlineEndpointError(message=message) def get_endpoints_and_deployments(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str) -> List[Endpoint]: ml_client = self.get_ml_client(credential, subscription_id, resource_group_name, workspace_name) list_of_endpoints: List[Endpoint] = [] for ep in ml_client.online_endpoints.list(): endpoint = Endpoint( endpoint_name=ep.name, endpoint_url=ep.scoring_uri, endpoint_api_key=ml_client.online_endpoints.get_keys(ep.name).primary_key) ordered_deployment_names = sorted(ep.traffic, key=lambda item: item[1]) deployments = ml_client.online_deployments.list(ep.name) for deployment_name in ordered_deployment_names: for d in deployments: if d.name == deployment_name: model_family = get_model_type(d.model) if model_family is None: continue deployment = Deployment(deployment_name=d.name, model_family=model_family) endpoint.deployments.append(deployment) # Deployment are ordered by traffic level, first in is default if endpoint.default_deployment is None: endpoint.default_deployment = deployment if len(endpoint.deployments) > 0: list_of_endpoints.append(endpoint) self.__endpoints_and_deployments = list_of_endpoints return self.__endpoints_and_deployments def get_endpoint_url(self, endpoint_name, subscription_id, resource_group_name, workspace_name): return f"https://ml.azure.com/endpoints/realtime/{endpoint_name}" \ + f"/detail?wsid=/subscriptions/{subscription_id}" \ + f"/resourceGroups/{resource_group_name}" \ + f"/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" def list_endpoint_names(self, credential, subscription_id, resource_group_name, workspace_name, return_endpoint_url: bool = False ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: '''Function for listing endpoints in the UX''' endpoints_and_deployments = self.get_endpoints_and_deployments( credential, subscription_id, resource_group_name, workspace_name) result = [] for e in endpoints_and_deployments: result.append({ "value": f"onlineEndpoint/{e.endpoint_name}", "display_value": f"[Online] {e.endpoint_name}", "hyperlink": self.get_endpoint_url(e.endpoint_name, subscription_id, resource_group_name, workspace_name), "description": f"Online Endpoint: {e.endpoint_name}", }) if return_endpoint_url: result[-1]['url'] = e.endpoint_url return result def list_deployment_names(self, credential, subscription_id, resource_group_name, workspace_name, endpoint_name: str ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: '''Function for listing deployments in the UX''' if endpoint_name is None: return [] endpoints_and_deployments = self.get_endpoints_and_deployments( credential, subscription_id, resource_group_name, workspace_name) for endpoint in endpoints_and_deployments: if endpoint.endpoint_name == endpoint_name: result = [] for d in endpoint.deployments: result.append({ "value": d.deployment_name, "display_value": d.deployment_name, # "hyperlink": '', "description": f"this is {d.deployment_name} item", }) return result return [] ENDPOINT_CONTAINER = EndpointsContainer() CUSTOM_CONNECTION_CONTAINER = CustomConnectionsContainer() SERVERLESS_ENDPOINT_CONTAINER = ServerlessEndpointsContainer() def is_serverless_endpoint(endpoint_url: str) -> bool: return "serverless.ml.azure.com" in endpoint_url or "inference.ai.azure.com" in endpoint_url def try_get_from_dict(some_dict: Dict, key_list: List): for key in key_list: if some_dict is None: return some_dict elif key in some_dict: some_dict = some_dict[key] else: return None return some_dict def parse_endpoint_connection_type(endpoint_connection_name: str) -> Tuple[str, str]: endpoint_connection_details = endpoint_connection_name.split("/") return (endpoint_connection_details[0].lower(), endpoint_connection_details[1]) def list_endpoint_names(subscription_id: str, resource_group_name: str, workspace_name: str, return_endpoint_url: bool = False, force_refresh: bool = False) -> List[Dict[str, Union[str, int, float, list, Dict]]]: cache_file_path = None try: with tempfile.NamedTemporaryFile(delete=False) as temp_file: cache_file_path = os.path.join(os.path.dirname(temp_file.name), CONNECTION_CACHE_FILE) print(f"Attempting to read connection cache. File path: {cache_file_path}", file=sys.stdout) if force_refresh: print("....skipping. force_refresh is True", file=sys.stdout) else: with open(cache_file_path, 'r') as file: cache = ConnectionCache.from_filename(file) if cache.can_use(subscription_id, resource_group_name, workspace_name): if len(cache.connection_names) > 0: print("....using Connection Cache File", file=sys.stdout) return cache.connection_names else: print("....skipping. No connections in file", file=sys.stdout) else: print("....skipping. File not relevant", file=sys.stdout) except Exception as e: print(f"....failed to find\\read connection cache file. Regenerating. Error:{e}", file=sys.stdout) try: from azure.identity import DefaultAzureCredential credential = DefaultAzureCredential(exclude_interactive_browser_credential=False) token = credential.get_token("https://management.azure.com/.default").token except Exception as e: print(f"Skipping list_endpoint_names. Exception: {e}", file=sys.stderr) msg = "Exception getting token: Please retry" return [{"value": msg, "display_value": msg, "description": msg}] serverless_endpoints = SERVERLESS_ENDPOINT_CONTAINER.list_serverless_endpoints(token, subscription_id, resource_group_name, workspace_name, return_endpoint_url) online_endpoints = ENDPOINT_CONTAINER.list_endpoint_names(credential, subscription_id, resource_group_name, workspace_name, return_endpoint_url) custom_connections = CUSTOM_CONNECTION_CONTAINER.list_custom_connection_names(credential, subscription_id, resource_group_name, workspace_name, return_endpoint_url) list_of_endpoints = custom_connections + serverless_endpoints + online_endpoints cache = ConnectionCache(use_until=(datetime.now() + timedelta(minutes=5)).isoformat(), subscription_id=subscription_id, resource_group=resource_group_name, workspace_name=workspace_name, connection_names=list_of_endpoints) if len(list_of_endpoints) == 0: msg = "No endpoints found. Please add a connection." return [{"value": msg, "display_value": msg, "description": msg}] if cache_file_path is not None: try: print(f"Attempting to write connection cache. File path: {cache_file_path}", file=sys.stdout) with open(cache_file_path, 'w') as file: json.dump(cache, file, default=lambda obj: obj.__dict__) print("....written", file=sys.stdout) except Exception as e: print(f"""....failed to write connection cache file. Will need to reload next time. Error:{e}""", file=sys.stdout) return list_of_endpoints def list_deployment_names(subscription_id: str, resource_group_name: str, workspace_name: str, endpoint: str = None) -> List[Dict[str, Union[str, int, float, list, Dict]]]: deployment_default_list = [{ "value": DEPLOYMENT_DEFAULT, "display_value": DEPLOYMENT_DEFAULT, "description": "This will use the default deployment for the selected online endpoint." + "You can also manually enter a deployment name here." }] if endpoint is None or endpoint.strip() == "" or "/" not in endpoint: return deployment_default_list (endpoint_connection_type, endpoint_connection_name) = parse_endpoint_connection_type(endpoint) if endpoint_connection_type != "onlineendpoint": return deployment_default_list try: from azure.identity import DefaultAzureCredential credential = DefaultAzureCredential(exclude_interactive_browser_credential=False) except Exception as e: print(f"Skipping list_deployment_names. Exception: {e}", file=sys.stderr) return deployment_default_list return deployment_default_list + ENDPOINT_CONTAINER.list_deployment_names( credential, subscription_id, resource_group_name, workspace_name, endpoint_connection_name ) def get_model_type(deployment_model: str) -> str: m = re.match(r'azureml://registries/[^/]+/models/([^/]+)/versions/', deployment_model) if m is None: print(f"Unexpected model format: {deployment_model}. Skipping", file=sys.stdout) return None model = m[1].lower() if model.startswith("llama-2"): return ModelFamily.LLAMA elif model.startswith("tiiuae-falcon"): return ModelFamily.FALCON elif model.startswith("databricks-dolly-v2"): return ModelFamily.DOLLY elif model.startswith("gpt2"): return ModelFamily.GPT2 else: # Not found and\or handled. Ignore this endpoint\deployment print(f"Unexpected model type: {model} derived from deployed model: {deployment_model}") return None def validate_model_family(model_family: str): try: return ModelFamily[model_family] except KeyError: accepted_models = ",".join([model.name for model in ModelFamily]) raise OpenModelLLMKeyValidationError( message=f"""Given model_family '{model_family}' not recognized. Supported models are: {accepted_models}.""" ) class ModelFamily(str, Enum): LLAMA = "LLaMa" DOLLY = "Dolly" GPT2 = "GPT-2" FALCON = "Falcon" @classmethod def _missing_(cls, value): value = value.lower() for member in cls: if member.lower() == value: return member return None STANDARD_CONTRACT_MODELS = [ModelFamily.DOLLY, ModelFamily.GPT2, ModelFamily.FALCON] class API(str, Enum): CHAT = "chat" COMPLETION = "completion" class ContentFormatterBase: """Transform request and response of AzureML endpoint to match with required schema. """ content_type: Optional[str] = "application/json" """The MIME type of the input data passed to the endpoint""" accepts: Optional[str] = "application/json" """The MIME type of the response data returned from the endpoint""" @staticmethod def escape_special_characters(prompt: str) -> str: """Escapes any special characters in `prompt`""" return re.sub( r'\\([\\\"a-zA-Z])', r'\\\1', prompt) @staticmethod def parse_chat(chat_str: str) -> List[Dict[str, str]]: # LLaMa only supports below roles. separator = r"(?i)\n*(system|user|assistant)\s*:\s*\n" chunks = re.split(separator, chat_str) # remove any empty chunks chunks = [c.strip() for c in chunks if c.strip()] chat_list = [] for index in range(0, len(chunks), 2): role = chunks[index].lower() # Check if prompt follows chat api message format and has valid role. try: validate_role(role, VALID_LLAMA_ROLES) except ChatAPIInvalidRole as e: raise OpenModelLLMUserError(message=e.message) if len(chunks) <= index + 1: message = "Unexpected chat format. Please ensure the query matches the chat format of the model used." raise OpenModelLLMUserError(message=message) chat_list.append({ "role": role, "content": chunks[index+1] }) return chat_list @abstractmethod def format_request_payload(self, prompt: str, model_kwargs: Dict) -> str: """Formats the request body according to the input schema of the model. Returns bytes or seekable file like object in the format specified in the content_type request header. """ @abstractmethod def format_response_payload(self, output: bytes) -> str: """Formats the response body according to the output schema of the model. Returns the data type that is received from the response. """ class MIRCompleteFormatter(ContentFormatterBase): """Content handler for LLMs from the HuggingFace catalog.""" def format_request_payload(self, prompt: str, model_kwargs: Dict) -> str: input_str = json.dumps( { "input_data": {"input_string": [ContentFormatterBase.escape_special_characters(prompt)]}, "parameters": model_kwargs, } ) return input_str def format_response_payload(self, output: bytes) -> str: """These models only support generation - expect a single output style""" response_json = json.loads(output) if len(response_json) > 0 and "0" in response_json[0]: if "0" in response_json[0]: return response_json[0]["0"] elif "output" in response_json: return response_json["output"] error_message = f"Unexpected response format. Response: {response_json}" print(error_message, file=sys.stderr) raise OpenSourceLLMOnlineEndpointError(message=error_message) class LlamaContentFormatter(ContentFormatterBase): """Content formatter for LLaMa""" def __init__(self, api: API, chat_history: Optional[str] = ""): super().__init__() self.api = api self.chat_history = chat_history def format_request_payload(self, prompt: str, model_kwargs: Dict) -> str: """Formats the request according the the chosen api""" if "do_sample" not in model_kwargs: model_kwargs["do_sample"] = True if self.api == API.CHAT: prompt_value = ContentFormatterBase.parse_chat(self.chat_history) else: prompt_value = [ContentFormatterBase.escape_special_characters(prompt)] return json.dumps( { "input_data": { "input_string": prompt_value, "parameters": model_kwargs } } ) def format_response_payload(self, output: bytes) -> str: """Formats response""" response_json = json.loads(output) if self.api == API.CHAT and "output" in response_json: return response_json["output"] elif self.api == API.COMPLETION and len(response_json) > 0 and "0" in response_json[0]: return response_json[0]["0"] else: error_message = f"Unexpected response format. Response: {response_json}" print(error_message, file=sys.stderr) raise OpenModelLLMOnlineEndpointError(message=error_message) class ServerlessLlamaContentFormatter(ContentFormatterBase): """Content formatter for LLaMa""" def __init__(self, api: API, chat_history: Optional[str] = ""): super().__init__() self.api = api self.chat_history = chat_history self.model_id = "llama-2-7b-hf" def format_request_payload(self, prompt: str, model_kwargs: Dict) -> str: """Formats the request according the the chosen api""" # Modify max_tokens key for serverless model_kwargs["max_tokens"] = model_kwargs["max_new_tokens"] if self.api == API.CHAT: messages = ContentFormatterBase.parse_chat(self.chat_history) base_body = { "model": self.model_id, "messages": messages, "n": 1, } base_body.update(model_kwargs) else: prompt_value = ContentFormatterBase.escape_special_characters(prompt) base_body = { "prompt": prompt_value, "n": 1, } base_body.update(model_kwargs) return json.dumps(base_body) def format_response_payload(self, output: bytes) -> str: """Formats response""" response_json = json.loads(output) if self.api == API.CHAT and "choices" in response_json: return response_json["choices"][0]["message"]["content"] elif self.api == API.COMPLETION and "choices" in response_json: return response_json["choices"][0]["text"] else: error_message = f"Unexpected response format. Response: {response_json}" print(error_message, file=sys.stderr) raise OpenModelLLMOnlineEndpointError(message=error_message) class ContentFormatterFactory: """Factory class for supported models""" def get_content_formatter( model_family: ModelFamily, api: API, chat_history: Optional[List[Dict]] = [], endpoint_url: Optional[str] = "" ) -> ContentFormatterBase: if model_family == ModelFamily.LLAMA: if is_serverless_endpoint(endpoint_url): return ServerlessLlamaContentFormatter(chat_history=chat_history, api=api) else: return LlamaContentFormatter(chat_history=chat_history, api=api) elif model_family in STANDARD_CONTRACT_MODELS: return MIRCompleteFormatter() class AzureMLOnlineEndpoint: """Azure ML Online Endpoint models.""" endpoint_url: str = "" """URL of pre-existing Endpoint. Should be passed to constructor or specified as env var `AZUREML_ENDPOINT_URL`.""" endpoint_api_key: str = "" """Authentication Key for Endpoint. Should be passed to constructor or specified as env var `AZUREML_ENDPOINT_API_KEY`.""" content_formatter: Any = None """The content formatter that provides an input and output transform function to handle formats between the LLM and the endpoint""" model_kwargs: Optional[Dict] = {} """Key word arguments to pass to the model.""" def __init__( self, endpoint_url: str, endpoint_api_key: str, content_formatter: ContentFormatterBase, model_family: ModelFamily, deployment_name: Optional[str] = None, model_kwargs: Optional[Dict] = {}, ): self.endpoint_url = endpoint_url self.endpoint_api_key = endpoint_api_key self.deployment_name = deployment_name self.content_formatter = content_formatter self.model_kwargs = model_kwargs self.model_family = model_family def _call_endpoint(self, request_body: str) -> str: """call.""" headers = { "Content-Type": "application/json", "Authorization": ("Bearer " + self.endpoint_api_key), "x-ms-user-agent": "PromptFlow/OpenModelLLM/" + self.model_family } # If this is not set it'll use the default deployment on the endpoint. if self.deployment_name is not None: headers["azureml-model-deployment"] = self.deployment_name result = requests.post(self.endpoint_url, data=request_body, headers=headers) if result.status_code != 200: error_message = f"""Request failure while calling Online Endpoint Status:{result.status_code} Error:{result.text}""" print(error_message, file=sys.stderr) raise OpenModelLLMOnlineEndpointError(message=error_message) return result.text def __call__( self, prompt: str ) -> str: """Call out to an AzureML Managed Online endpoint. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = azureml_model("Tell me a joke.") """ request_body = self.content_formatter.format_request_payload(prompt, self.model_kwargs) endpoint_response = self._call_endpoint(request_body) response = self.content_formatter.format_response_payload(endpoint_response) return response class OpenModelLLM(ToolProvider): def __init__(self): super().__init__() def get_deployment_from_endpoint(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str = None) -> Tuple[str, str, str]: endpoints_and_deployments = ENDPOINT_CONTAINER.get_endpoints_and_deployments( credential, subscription_id, resource_group_name, workspace_name) for ep in endpoints_and_deployments: if ep.endpoint_name == endpoint_name: if deployment_name is None: return (ep.endpoint_url, ep.endpoint_api_key, ep.default_deployment.model_family) for d in ep.deployments: if d.deployment_name == deployment_name: return (ep.endpoint_url, ep.endpoint_api_key, d.model_family) message = f"""Invalid endpoint and deployment values. Please ensure endpoint name and deployment names are correct, and the deployment was successfull. Could not find endpoint: {endpoint_name} and deployment: {deployment_name}""" raise OpenModelLLMUserError(message=message) def sanitize_endpoint_url(self, endpoint_url: str, api_type: API): if is_serverless_endpoint(endpoint_url): if api_type == API.CHAT: if not endpoint_url.endswith("/v1/chat/completions"): return endpoint_url + "/v1/chat/completions" else: if not endpoint_url.endswith("/v1/completions"): return endpoint_url + "/v1/completions" return endpoint_url def get_endpoint_details(self, subscription_id: str, resource_group_name: str, workspace_name: str, endpoint: str, api_type: API, deployment_name: str = None, **kwargs) -> Tuple[str, str, str]: if self.endpoint_values_in_kwargs(**kwargs): endpoint_url = kwargs["endpoint_url"] endpoint_api_key = kwargs["endpoint_api_key"] model_family = kwargs["model_family"] # clean these up, aka don't send them to MIR del kwargs["endpoint_url"] del kwargs["endpoint_api_key"] del kwargs["model_family"] return (endpoint_url, endpoint_api_key, model_family) (endpoint_connection_type, endpoint_connection_name) = parse_endpoint_connection_type(endpoint) print(f"endpoint_connection_type: {endpoint_connection_type} name: {endpoint_connection_name}", file=sys.stdout) con_type = endpoint_connection_type.lower() if con_type in AUTH_REQUIRED_CONNECTION_TYPES: try: from azure.identity import DefaultAzureCredential credential = DefaultAzureCredential(exclude_interactive_browser_credential=False) token = credential.get_token("https://management.azure.com/.default").token except Exception as e: message = f"""Error encountered while attempting to Authorize access to {endpoint}. Exception: {e}""" print(message, file=sys.stderr) raise OpenModelLLMUserError(message=message) if con_type == "serverlessendpoint": (endpoint_url, endpoint_api_key, model_family) = SERVERLESS_ENDPOINT_CONTAINER.get_serverless_endpoint_key( token, subscription_id, resource_group_name, workspace_name, endpoint_connection_name) elif con_type == "onlineendpoint": (endpoint_url, endpoint_api_key, model_family) = self.get_deployment_from_endpoint( credential, subscription_id, resource_group_name, workspace_name, endpoint_connection_name, deployment_name) elif con_type == "connection": (endpoint_url, endpoint_api_key, model_family) = CUSTOM_CONNECTION_CONTAINER.get_endpoint_from_azure_custom_connection( credential, subscription_id, resource_group_name, workspace_name, endpoint_connection_name) elif con_type == "localconnection": (endpoint_url, endpoint_api_key, model_family) = CUSTOM_CONNECTION_CONTAINER.get_endpoint_from_local_custom_connection( endpoint_connection_name) else: raise OpenModelLLMUserError(message=f"Invalid endpoint connection type: {endpoint_connection_type}") return (self.sanitize_endpoint_url(endpoint_url, api_type), endpoint_api_key, model_family) def endpoint_values_in_kwargs(self, **kwargs): # This is mostly for testing, suggest not using this since security\privacy concerns for the endpoint key if 'endpoint_url' not in kwargs and 'endpoint_api_key' not in kwargs and 'model_family' not in kwargs: return False if 'endpoint_url' not in kwargs or 'endpoint_api_key' not in kwargs or 'model_family' not in kwargs: message = """Endpoint connection via kwargs not fully set. If using kwargs, the following values must be set: endpoint_url, endpoint_api_key, and model_family""" raise OpenModelLLMKeyValidationError(message=message) return True @tool @handle_online_endpoint_error() def call( self, prompt: PromptTemplate, api: API, endpoint_name: str, deployment_name: Optional[str] = None, temperature: Optional[float] = 1.0, max_new_tokens: Optional[int] = 500, top_p: Optional[float] = 1.0, model_kwargs: Optional[Dict] = {}, **kwargs ) -> str: # Sanitize deployment name. Empty deployment name is the same as None. if deployment_name is not None: deployment_name = deployment_name.strip() if not deployment_name or deployment_name == DEPLOYMENT_DEFAULT: deployment_name = None print(f"Executing Open Model LLM Tool for endpoint: '{endpoint_name}', deployment: '{deployment_name}'", file=sys.stdout) (endpoint_url, endpoint_api_key, model_family) = self.get_endpoint_details( subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION", None), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP", None), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME", None), endpoint=endpoint_name, api_type=api, deployment_name=deployment_name, **kwargs) prompt = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs) model_kwargs["top_p"] = top_p model_kwargs["temperature"] = temperature model_kwargs["max_new_tokens"] = max_new_tokens content_formatter = ContentFormatterFactory.get_content_formatter( model_family=model_family, api=api, chat_history=prompt, endpoint_url=endpoint_url ) llm = AzureMLOnlineEndpoint( endpoint_url=endpoint_url, endpoint_api_key=endpoint_api_key, model_family=model_family, content_formatter=content_formatter, deployment_name=deployment_name, model_kwargs=model_kwargs ) return llm(prompt)
promptflow/src/promptflow-tools/promptflow/tools/open_model_llm.py/0
{ "file_path": "promptflow/src/promptflow-tools/promptflow/tools/open_model_llm.py", "repo_id": "promptflow", "token_count": 23948 }
26
[pytest] markers = skip_if_no_api_key: skip the test if actual api key is not provided.
promptflow/src/promptflow-tools/tests/pytest.ini/0
{ "file_path": "promptflow/src/promptflow-tools/tests/pytest.ini", "repo_id": "promptflow", "token_count": 33 }
27
import pytest from promptflow.exceptions import UserErrorException from promptflow.tools.serpapi import Engine, SafeMode, search import tests.utils as utils @pytest.mark.usefixtures("use_secrets_config_file") @pytest.mark.skip_if_no_api_key("serp_connection") class TestSerpAPI: def test_engine(self, serp_connection): query = "cute cats" num = 2 result_dict = search( connection=serp_connection, query=query, num=num, safe=SafeMode.ACTIVE, engine=Engine.GOOGLE.value) utils.is_json_serializable(result_dict, "serp api search()") assert result_dict["search_metadata"]["google_url"] is not None assert int(result_dict["search_parameters"]["num"]) == num assert result_dict["search_parameters"]["safe"].lower() == "active" result_dict = search( connection=serp_connection, query=query, num=num, safe=SafeMode.ACTIVE, engine=Engine.BING.value) utils.is_json_serializable(result_dict, "serp api search()") assert int(result_dict["search_parameters"]["count"]) == num assert result_dict["search_parameters"]["safe_search"].lower() == "strict" def test_invalid_api_key(self, serp_connection): serp_connection.api_key = "hello" query = "cute cats" num = 2 engine = Engine.GOOGLE.value error_msg = "Invalid API key. Your API key should be here: https://serpapi.com/manage-api-key" with pytest.raises(UserErrorException) as exc_info: search(connection=serp_connection, query=query, num=num, engine=engine) assert error_msg == exc_info.value.args[0] @pytest.mark.parametrize("engine", [Engine.GOOGLE.value, Engine.BING.value]) def test_invalid_query(self, serp_connection, engine): query = "" num = 2 error_msg = "Missing query `q` parameter." with pytest.raises(UserErrorException) as exc_info: search(connection=serp_connection, query=query, num=num, engine=engine) assert error_msg == exc_info.value.args[0]
promptflow/src/promptflow-tools/tests/test_serpapi.py/0
{ "file_path": "promptflow/src/promptflow-tools/tests/test_serpapi.py", "repo_id": "promptflow", "token_count": 832 }
28
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import json from functools import partial from promptflow._cli._params import ( add_param_all_results, add_param_max_results, add_param_set, add_param_yes, base_params, ) from promptflow._cli._utils import activate_action, confirm, exception_handler, get_secret_input, print_yellow_warning from promptflow._sdk._constants import MAX_LIST_CLI_RESULTS from promptflow._sdk._load_functions import load_connection from promptflow._sdk._pf_client import PFClient from promptflow._sdk.entities._connection import _Connection from promptflow._utils.logger_utils import get_cli_sdk_logger from promptflow._utils.yaml_utils import load_yaml logger = get_cli_sdk_logger() _client = None def _get_pf_client(): global _client if _client is None: _client = PFClient() return _client def add_param_file(parser): parser.add_argument("--file", "-f", type=str, help="File path of the connection yaml.", required=True) def add_param_name(parser, required=False): parser.add_argument("--name", "-n", type=str, help="Name of the connection.", required=required) def add_connection_parser(subparsers): connection_parser = subparsers.add_parser( "connection", description="""A CLI tool to manage connections for promptflow. Your secrets will be encrypted using AES(Advanced Encryption Standard) technology.""", # noqa: E501 help="pf connection", ) subparsers = connection_parser.add_subparsers() add_connection_create(subparsers) add_connection_update(subparsers) add_connection_show(subparsers) add_connection_list(subparsers) add_connection_delete(subparsers) connection_parser.set_defaults(action="connection") def add_connection_create(subparsers): # Do not change the indent of epilog epilog = """ Examples: # Creating a connection with yaml file: pf connection create -f connection.yaml # Creating a connection with yaml file and overrides: pf connection create -f connection.yaml --set api_key="my_api_key" # Creating a custom connection with .env file, note that overrides specified by --set will be ignored: pf connection create -f .env --name custom """ activate_action( name="create", description="Create a connection.", epilog=epilog, add_params=[add_param_set, add_param_file, add_param_name] + base_params, subparsers=subparsers, help_message="Create a connection.", action_param_name="sub_action", ) def add_connection_update(subparsers): epilog = """ Examples: # Updating a connection: pf connection update -n my_connection --set api_key="my_api_key" """ activate_action( name="update", description="Update a connection.", epilog=epilog, add_params=[add_param_set, partial(add_param_name, required=True)] + base_params, subparsers=subparsers, help_message="Update a connection.", action_param_name="sub_action", ) def add_connection_show(subparsers): epilog = """ Examples: # Get and show a connection: pf connection show -n my_connection_name """ activate_action( name="show", description="Show a connection for promptflow.", epilog=epilog, add_params=[partial(add_param_name, required=True)] + base_params, subparsers=subparsers, help_message="Show a connection for promptflow.", action_param_name="sub_action", ) def add_connection_delete(subparsers): epilog = """ Examples: # Delete a connection: pf connection delete -n my_connection_name """ activate_action( name="delete", description="Delete a connection with specific name.", epilog=epilog, add_params=[partial(add_param_name, required=True), add_param_yes] + base_params, subparsers=subparsers, help_message="Delete a connection with specific name.", action_param_name="sub_action", ) def add_connection_list(subparsers): epilog = """ Examples: # List all connections: pf connection list """ activate_action( name="list", description="List all connections.", epilog=epilog, add_params=[add_param_max_results, add_param_all_results] + base_params, subparsers=subparsers, help_message="List all connections.", action_param_name="sub_action", ) def validate_and_interactive_get_secrets(connection, is_update=False): """Validate the connection and interactive get secrets if no secrets is provided.""" prompt = "=================== Please input required secrets ===================" missing_secrets_prompt = False for name, val in connection.secrets.items(): if not _Connection._is_scrubbed_value(val) and not _Connection._is_user_input_value(val): # Not scrubbed value, not require user input. continue if is_update and _Connection._is_scrubbed_value(val): # Scrubbed value, will use existing, not require user input. continue if not missing_secrets_prompt: print(prompt) missing_secrets_prompt = True while True: secret = get_secret_input(prompt=f"{name}: ") if secret: break print_yellow_warning("Secret can't be empty.") connection.secrets[name] = secret if missing_secrets_prompt: print("=================== Required secrets collected ===================") return connection # Note the connection secrets value behaviors: # -------------------------------------------------------------------------------- # | secret value | CLI create | CLI update | SDK create_or_update | # -------------------------------------------------------------------------------- # | empty or all "*" | prompt input | use existing values | use existing values | # | <no-change> | prompt input | use existing values | use existing values | # | <user-input> | prompt input | prompt input | raise error | # -------------------------------------------------------------------------------- @exception_handler("Connection create") def create_connection(file_path, params_override=None, name=None): params_override = params_override or [] if name: params_override.append({"name": name}) connection = load_connection(source=file_path, params_override=params_override) existing_connection = _get_pf_client().connections.get(connection.name, raise_error=False) if existing_connection: logger.warning(f"Connection with name {connection.name} already exists. Updating it.") # Note: We don't set the existing secret back here, let user input the secrets. validate_and_interactive_get_secrets(connection) connection = _get_pf_client().connections.create_or_update(connection) print(json.dumps(connection._to_dict(), indent=4)) @exception_handler("Connection show") def show_connection(name): connection = _get_pf_client().connections.get(name) print(json.dumps(connection._to_dict(), indent=4)) @exception_handler("Connection list") def list_connection(max_results=MAX_LIST_CLI_RESULTS, all_results=False): connections = _get_pf_client().connections.list(max_results, all_results) print(json.dumps([connection._to_dict() for connection in connections], indent=4)) def _upsert_connection_from_file(file, params_override=None): # Note: This function is used for pfutil, do not edit it. params_override = params_override or [] params_override.append(load_yaml(file)) connection = load_connection(source=file, params_override=params_override) existing_connection = _get_pf_client().connections.get(connection.name, raise_error=False) if existing_connection: connection = _Connection._load(data=existing_connection._to_dict(), params_override=params_override) validate_and_interactive_get_secrets(connection, is_update=True) # Set the secrets not scrubbed, as _to_dict() dump scrubbed connections. connection._secrets = existing_connection._secrets else: validate_and_interactive_get_secrets(connection) connection = _get_pf_client().connections.create_or_update(connection) return connection @exception_handler("Connection update") def update_connection(name, params_override=None): params_override = params_override or [] existing_connection = _get_pf_client().connections.get(name) connection = _Connection._load(data=existing_connection._to_dict(), params_override=params_override) validate_and_interactive_get_secrets(connection, is_update=True) # Set the secrets not scrubbed, as _to_dict() dump scrubbed connections. connection._secrets = existing_connection._secrets connection = _get_pf_client().connections.create_or_update(connection) print(json.dumps(connection._to_dict(), indent=4)) @exception_handler("Connection delete") def delete_connection(name, skip_confirm: bool = False): if confirm("Are you sure you want to perform this operation?", skip_confirm): _get_pf_client().connections.delete(name) else: print("The delete operation was canceled.") def dispatch_connection_commands(args: argparse.Namespace): if args.sub_action == "create": create_connection(args.file, args.params_override, args.name) elif args.sub_action == "show": show_connection(args.name) elif args.sub_action == "list": list_connection(args.max_results, args.all_results) elif args.sub_action == "update": update_connection(args.name, args.params_override) elif args.sub_action == "delete": delete_connection(args.name, args.yes)
promptflow/src/promptflow/promptflow/_cli/_pf/_connection.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_cli/_pf/_connection.py", "repo_id": "promptflow", "token_count": 3450 }
29
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import contextlib import json import os import shutil import sys import traceback from collections import namedtuple from configparser import ConfigParser from functools import wraps from pathlib import Path from typing import Dict, List, Optional, Tuple import pydash from dotenv import load_dotenv from tabulate import tabulate from promptflow._sdk._constants import CLIListOutputFormat from promptflow._sdk._utils import print_red_error, print_yellow_warning from promptflow._utils.exception_utils import ExceptionPresenter from promptflow._utils.logger_utils import get_cli_sdk_logger from promptflow._utils.utils import is_in_ci_pipeline from promptflow.exceptions import ErrorTarget, PromptflowException, UserErrorException AzureMLWorkspaceTriad = namedtuple("AzureMLWorkspace", ["subscription_id", "resource_group_name", "workspace_name"]) logger = get_cli_sdk_logger() def _set_workspace_argument_for_subparsers(subparser, required=False): """Add workspace arguments to subparsers.""" # Make these arguments optional so that user can use local azure cli context subparser.add_argument( "--subscription", required=required, type=str, help="Subscription id, required when pass run id." ) subparser.add_argument( "--resource-group", "-g", required=required, type=str, help="Resource group name, required when pass run id." ) subparser.add_argument( "--workspace-name", "-w", required=required, type=str, help="Workspace name, required when pass run id." ) def dump_connection_file(dot_env_file: str): for key in ["AZURE_OPENAI_API_KEY", "AZURE_OPENAI_API_BASE", "CHAT_DEPLOYMENT_NAME"]: if key not in os.environ: # skip dump connection file if not all required environment variables are set return connection_file_path = "./connection.json" os.environ["PROMPTFLOW_CONNECTIONS"] = connection_file_path load_dotenv(dot_env_file) connection_dict = { "custom_connection": { "type": "CustomConnection", "value": { "AZURE_OPENAI_API_KEY": os.environ["AZURE_OPENAI_API_KEY"], "AZURE_OPENAI_API_BASE": os.environ["AZURE_OPENAI_API_BASE"], "CHAT_DEPLOYMENT_NAME": os.environ["CHAT_DEPLOYMENT_NAME"], }, "module": "promptflow.connections", } } with open(connection_file_path, "w") as f: json.dump(connection_dict, f) def get_workspace_triad_from_local() -> AzureMLWorkspaceTriad: subscription_id = None resource_group_name = None workspace_name = None azure_config_path = Path.home() / ".azure" config_parser = ConfigParser() # subscription id try: config_parser.read_file(open(azure_config_path / "clouds.config")) subscription_id = config_parser["AzureCloud"]["subscription"] except Exception: # pylint: disable=broad-except pass # resource group name & workspace name try: config_parser.read_file(open(azure_config_path / "config")) resource_group_name = config_parser["defaults"]["group"] workspace_name = config_parser["defaults"]["workspace"] except Exception: # pylint: disable=broad-except pass return AzureMLWorkspaceTriad(subscription_id, resource_group_name, workspace_name) def get_credentials_for_cli(): """ This function is part of mldesigner.dsl._dynamic_executor.DynamicExecutor._get_ml_client with some local imports. """ from azure.ai.ml.identity import AzureMLOnBehalfOfCredential from azure.identity import AzureCliCredential, DefaultAzureCredential, ManagedIdentityCredential # May return a different one if executing in local # credential priority: OBO > managed identity > default # check OBO via environment variable, the referenced code can be found from below search: # https://msdata.visualstudio.com/Vienna/_search?text=AZUREML_OBO_ENABLED&type=code&pageSize=25&filters=ProjectFilters%7BVienna%7D&action=contents if os.getenv(IdentityEnvironmentVariable.OBO_ENABLED_FLAG): logger.debug("User identity is configured, use OBO credential.") credential = AzureMLOnBehalfOfCredential() else: client_id_from_env = os.getenv(IdentityEnvironmentVariable.DEFAULT_IDENTITY_CLIENT_ID) if client_id_from_env: # use managed identity when client id is available from environment variable. # reference code: # https://learn.microsoft.com/en-us/azure/machine-learning/how-to-identity-based-service-authentication?tabs=cli#compute-cluster logger.debug("Use managed identity credential.") credential = ManagedIdentityCredential(client_id=client_id_from_env) elif is_in_ci_pipeline(): # use managed identity when executing in CI pipeline. logger.debug("Use azure cli credential.") credential = AzureCliCredential() else: # use default Azure credential to handle other cases. logger.debug("Use default credential.") credential = DefaultAzureCredential() return credential def get_client_info_for_cli(subscription_id: str = None, resource_group_name: str = None, workspace_name: str = None): if not (subscription_id and resource_group_name and workspace_name): workspace_triad = get_workspace_triad_from_local() subscription_id = subscription_id or workspace_triad.subscription_id resource_group_name = resource_group_name or workspace_triad.resource_group_name workspace_name = workspace_name or workspace_triad.workspace_name if not (subscription_id and resource_group_name and workspace_name): workspace_name = workspace_name or os.getenv("AZUREML_ARM_WORKSPACE_NAME") subscription_id = subscription_id or os.getenv("AZUREML_ARM_SUBSCRIPTION") resource_group_name = resource_group_name or os.getenv("AZUREML_ARM_RESOURCEGROUP") return subscription_id, resource_group_name, workspace_name def get_client_for_cli(*, subscription_id: str = None, resource_group_name: str = None, workspace_name: str = None): from azure.ai.ml import MLClient subscription_id, resource_group_name, workspace_name = get_client_info_for_cli( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name ) missing_fields = [] for key in ["workspace_name", "subscription_id", "resource_group_name"]: if not locals()[key]: missing_fields.append(key) if missing_fields: raise UserErrorException( "Please provide all required fields to work on specific workspace: {}".format(", ".join(missing_fields)), target=ErrorTarget.CONTROL_PLANE_SDK, ) return MLClient( credential=get_credentials_for_cli(), subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, ) def confirm(question, skip_confirm) -> bool: if skip_confirm: return True answer = input(f"{question} [y/n]") while answer.lower() not in ["y", "n"]: answer = input("Please input 'y' or 'n':") return answer.lower() == "y" @contextlib.contextmanager def inject_sys_path(path): original_sys_path = sys.path.copy() sys.path.insert(0, str(path)) try: yield finally: sys.path = original_sys_path def activate_action(name, description, epilog, add_params, subparsers, help_message, action_param_name="action"): parser = subparsers.add_parser( name, description=description, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter, help=help_message, ) if add_params: for add_param_func in add_params: add_param_func(parser) parser.set_defaults(**{action_param_name: name}) return parser class IdentityEnvironmentVariable: """This class is copied from mldesigner._constants.IdentityEnvironmentVariable.""" DEFAULT_IDENTITY_CLIENT_ID = "DEFAULT_IDENTITY_CLIENT_ID" OBO_ENABLED_FLAG = "AZUREML_OBO_ENABLED" def _dump_entity_with_warnings(entity) -> Dict: if not entity: return if isinstance(entity, Dict): return entity try: return entity._to_dict() # type: ignore except Exception as err: logger.warning("Failed to deserialize response: " + str(err)) logger.warning(str(entity)) logger.debug(traceback.format_exc()) def list_of_dict_to_dict(obj: list): if not isinstance(obj, list): return {} result = {} for item in obj: result.update(item) return result def list_of_dict_to_nested_dict(obj: list): result = {} for item in obj: for keys, value in item.items(): keys = keys.split(".") pydash.set_(result, keys, value) return result def _build_sorted_column_widths_tuple_list( columns: List[str], values1: Dict[str, int], values2: Dict[str, int], margins: Dict[str, int], ) -> List[Tuple[str, int]]: res = [] for column in columns: value = max(values1[column], values2[column]) + margins[column] res.append((column, value)) res.sort(key=lambda x: x[1], reverse=True) return res def _assign_available_width( column_expected_widths: List[Tuple[str, int]], available_width: int, column_assigned_widths: Dict[str, int], average_width: Optional[int] = None, ) -> Tuple[int, Dict[str, int]]: for column, expected_width in column_expected_widths: if available_width <= 0: break target = average_width if average_width is not None else column_assigned_widths[column] delta = expected_width - target if delta <= available_width: column_assigned_widths[column] = expected_width available_width -= delta else: column_assigned_widths[column] += available_width available_width = 0 return available_width, column_assigned_widths def _calculate_column_widths(df: "DataFrame", terminal_width: int) -> List[int]: num_rows, num_columns = len(df), len(df.columns) index_column_width = max(len(str(num_rows)) + 2, 4) # tabulate index column min width is 4 terminal_width_buffer = 10 available_width = terminal_width - terminal_width_buffer - index_column_width - (num_columns + 2) avg_available_width = available_width // num_columns header_widths, content_avg_widths, content_max_widths, column_margin = {}, {}, {}, {} for column in df.columns: header_widths[column] = len(column) contents = [] for value in df[column]: contents.append(len(str(value))) content_avg_widths[column] = sum(contents) // len(contents) content_max_widths[column] = max(contents) # if header is longer than the longest content, the margin is 4; otherwise is 2 # so we need to record this for every column if header_widths[column] >= content_max_widths[column]: column_margin[column] = 4 else: column_margin[column] = 2 column_widths = {} # first round: try to meet the average(or column header) width # record columns that need more width, we will deal with them in second round if we still have width round_one_left_columns = [] for column in df.columns: expected_width = max(header_widths[column], content_avg_widths[column]) + column_margin[column] if avg_available_width <= expected_width: column_widths[column] = avg_available_width round_one_left_columns.append(column) else: column_widths[column] = expected_width current_available_width = available_width - sum(column_widths.values()) if current_available_width > 0: # second round: assign left available width to those columns that need more # assign with greedy, sort recorded columns first from longest to shortest; # iterate and try to meet each column's expected width column_avg_tuples = _build_sorted_column_widths_tuple_list( round_one_left_columns, header_widths, content_avg_widths, column_margin ) current_available_width, column_widths = _assign_available_width( column_avg_tuples, current_available_width, column_widths, avg_available_width ) if current_available_width > 0: # third round: if there are still left available width, assign to try to meet the max width # still use greedy, sort first and iterate through all columns column_max_tuples = _build_sorted_column_widths_tuple_list( df.columns, header_widths, content_max_widths, column_margin ) current_available_width, column_widths = _assign_available_width( column_max_tuples, current_available_width, column_widths ) max_col_widths = [index_column_width] # index column max_col_widths += [max(column_widths[column] - column_margin[column], 1) for column in df.columns] # sub margin return max_col_widths def pretty_print_dataframe_as_table(df: "DataFrame") -> None: # try to get terminal window width try: terminal_width = shutil.get_terminal_size().columns except Exception: # pylint: disable=broad-except terminal_width = 120 # default value for Windows Terminal launch size columns column_widths = _calculate_column_widths(df, terminal_width) print(tabulate(df, headers="keys", tablefmt="grid", maxcolwidths=column_widths, maxheadercolwidths=column_widths)) def is_format_exception(): if os.environ.get("PROMPTFLOW_STRUCTURE_EXCEPTION_OUTPUT", "false").lower() == "true": return True return False def exception_handler(command: str): """Catch known cli exceptions.""" def decorator(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: if is_format_exception(): # When the flag format_exception is set in command, # it will write a json with exception info and command to stderr. error_msg = ExceptionPresenter.create(e).to_dict(include_debug_info=True) error_msg["command"] = " ".join(sys.argv) sys.stderr.write(json.dumps(error_msg)) if isinstance(e, PromptflowException): print_red_error(f"{command} failed with {e.__class__.__name__}: {str(e)}") exit(1) else: raise e return wrapper return decorator def get_secret_input(prompt, mask="*"): """Get secret input with mask printed on screen in CLI. Provide better handling for control characters: - Handle Ctrl-C as KeyboardInterrupt - Ignore control characters and print warning message. """ if not isinstance(prompt, str): raise TypeError(f"prompt must be a str, not ${type(prompt).__name__}") if not isinstance(mask, str): raise TypeError(f"mask argument must be a one-character str, not ${type(mask).__name__}") if len(mask) != 1: raise ValueError("mask argument must be a one-character str") if sys.platform == "win32": # For some reason, mypy reports that msvcrt doesn't have getch, ignore this warning: from msvcrt import getch # type: ignore else: # macOS and Linux import termios import tty def getch(): fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch secret_input = [] sys.stdout.write(prompt) sys.stdout.flush() while True: key = ord(getch()) if key == 13: # Enter key pressed. sys.stdout.write("\n") return "".join(secret_input) elif key == 3: # Ctrl-C pressed. raise KeyboardInterrupt() elif key in (8, 127): # Backspace/Del key erases previous output. if len(secret_input) > 0: # Erases previous character. sys.stdout.write("\b \b") # \b doesn't erase the character, it just moves the cursor back. sys.stdout.flush() secret_input = secret_input[:-1] elif 0 <= key <= 31: msg = "\nThe last user input got ignored as it is control character." print_yellow_warning(msg) sys.stdout.write(prompt + mask * len(secret_input)) sys.stdout.flush() else: # display the mask character. char = chr(key) sys.stdout.write(mask) sys.stdout.flush() secret_input.append(char) def _copy_to_flow(flow_path, source_file): target = flow_path / source_file.name action = "Overwriting" if target.exists() else "Creating" if source_file.is_file(): print(f"{action} {source_file.name}...") shutil.copy2(source_file, target) else: print(f"{action} {source_file.name} folder...") shutil.copytree(source_file, target, dirs_exist_ok=True) def _output_result_list_with_format(result_list: List[Dict], output_format: CLIListOutputFormat) -> None: import pandas as pd if output_format == CLIListOutputFormat.TABLE: df = pd.DataFrame(result_list) df.fillna("", inplace=True) pretty_print_dataframe_as_table(df) elif output_format == CLIListOutputFormat.JSON: print(json.dumps(result_list, indent=4)) else: warning_message = ( f"Unknown output format {output_format!r}, accepted values are 'json' and 'table';" "will print using 'json'." ) logger.warning(warning_message) print(json.dumps(result_list, indent=4)) def _get_cli_activity_name(cli, args): activity_name = cli if getattr(args, "action", None): activity_name += f".{args.action}" if getattr(args, "sub_action", None): activity_name += f".{args.sub_action}" return activity_name def _try_delete_existing_run_record(run_name: str): from promptflow._sdk._errors import RunNotFoundError from promptflow._sdk._orm import RunInfo as ORMRun try: ORMRun.delete(run_name) except RunNotFoundError: pass
promptflow/src/promptflow/promptflow/_cli/_utils.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_cli/_utils.py", "repo_id": "promptflow", "token_count": 7526 }
30
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from promptflow import tool @tool def line_process(groundtruth: str, prediction: str): """ This tool processes the prediction of a single line and returns the processed result. :param groundtruth: the groundtruth of a single line. :param prediction: the prediction of a single line. """ # Add your line processing logic here processed_result = "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect" return processed_result
promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/line_process.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/line_process.py", "repo_id": "promptflow", "token_count": 156 }
31
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import copy import json import os from dataclasses import fields, is_dataclass from pathlib import Path from typing import Any, Dict, List from promptflow._constants import CONNECTION_NAME_PROPERTY, CONNECTION_SECRET_KEYS, PROMPTFLOW_CONNECTIONS from promptflow._sdk._constants import CustomStrongTypeConnectionConfigs from promptflow._utils.utils import try_import from promptflow.contracts.tool import ConnectionType from promptflow.contracts.types import Secret class ConnectionManager: """This class will be used for construction mode to run flow. Do not include it into tool code.""" instance = None def __init__(self, _dict: Dict[str, dict] = None): if _dict is None and PROMPTFLOW_CONNECTIONS in os.environ: # !!! Important !!!: Do not leverage this environment variable in any production code, this is test only. if PROMPTFLOW_CONNECTIONS not in os.environ: raise ValueError(f"Required environment variable {PROMPTFLOW_CONNECTIONS!r} not set.") connection_path = Path(os.environ[PROMPTFLOW_CONNECTIONS]).resolve().absolute() if not connection_path.exists(): raise ValueError(f"Connection file not exists. Path {connection_path.as_posix()}.") _dict = json.loads(open(connection_path).read()) self._connections_dict = _dict or {} self._connections = self._build_connections(self._connections_dict) @classmethod def _build_connections(cls, _dict: Dict[str, dict]): """Build connection dict.""" from promptflow._core.tools_manager import connections as cls_mapping cls.import_requisites(_dict) connections = {} # key to connection object for key, connection_dict in _dict.items(): typ = connection_dict.get("type") if typ not in cls_mapping: supported = [key for key in cls_mapping.keys() if not key.startswith("_")] raise ValueError(f"Unknown connection {key!r} type {typ!r}, supported are {supported}.") value = connection_dict.get("value", {}) connection_class = cls_mapping[typ] from promptflow.connections import CustomConnection if connection_class is CustomConnection: # Note: CustomConnection definition can not be got, secret keys will be provided in connection dict. secret_keys = connection_dict.get("secret_keys", []) secrets = {k: v for k, v in value.items() if k in secret_keys} configs = {k: v for k, v in value.items() if k not in secrets} connection_value = connection_class(configs=configs, secrets=secrets) if CustomStrongTypeConnectionConfigs.PROMPTFLOW_TYPE_KEY in configs: connection_value.custom_type = configs[CustomStrongTypeConnectionConfigs.PROMPTFLOW_TYPE_KEY] else: """ Note: Ignore non exists keys of connection class, because there are some keys just used by UX like resource id, while not used by backend. """ if is_dataclass(connection_class): # Do not delete this branch, as promptflow_vectordb.connections is dataclass type. cls_fields = {f.name: f for f in fields(connection_class)} connection_value = connection_class(**{k: v for k, v in value.items() if k in cls_fields}) secret_keys = [f.name for f in cls_fields.values() if f.type == Secret] else: connection_value = connection_class(**{k: v for k, v in value.items()}) secrets = getattr(connection_value, "secrets", {}) secret_keys = list(secrets.keys()) if isinstance(secrets, dict) else [] # Set secret keys for log scrubbing setattr(connection_value, CONNECTION_SECRET_KEYS, secret_keys) # Use this hack to make sure serialization works setattr(connection_value, CONNECTION_NAME_PROPERTY, key) connections[key] = connection_value return connections @classmethod def init_from_env(cls): return ConnectionManager() def get(self, connection_info: Any) -> Any: """Get Connection by connection info. connection_info: connection name as string or connection object """ if isinstance(connection_info, str): return self._connections.get(connection_info) elif ConnectionType.is_connection_value(connection_info): return connection_info return None def get_secret_list(self) -> List[str]: def secrets(): for connection in self._connections.values(): secret_keys = getattr(connection, CONNECTION_SECRET_KEYS, []) for secret_key in secret_keys: yield getattr(connection, secret_key) return list(secrets()) @classmethod def import_requisites(cls, _dict: Dict[str, dict]): """Import connection required modules.""" modules = set() for key, connection_dict in _dict.items(): module = connection_dict.get("module") if module: modules.add(module) for module in modules: # Suppress import error, as we have legacy module promptflow.tools.connections. try_import(module, f"Import connection module {module!r} failed.", raise_error=False) @staticmethod def is_legacy_connections(_dict: Dict[str, dict]): """Detect if is legacy connections. Legacy connections dict doesn't have module and type. So import requisites can not be performed. Only request from MT will hit this. Legacy connection example: {"aoai_config": {"api_key": "..."}} """ has_module = any(isinstance(v, dict) and "module" in v for k, v in _dict.items()) return not has_module def to_connections_dict(self) -> dict: """Get all connections and reformat to key-values format.""" # Value returned: {"aoai_config": {"api_key": "..."}} return copy.deepcopy(self._connections_dict)
promptflow/src/promptflow/promptflow/_core/connection_manager.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_core/connection_manager.py", "repo_id": "promptflow", "token_count": 2536 }
32
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import logging import os.path import uuid from itertools import product from os import PathLike from pathlib import Path from typing import Optional, Union import pydash from promptflow._sdk._constants import ( DEFAULT_ENCODING, FLOW_DIRECTORY_MACRO_IN_CONFIG, HOME_PROMPT_FLOW_DIR, SERVICE_CONFIG_FILE, ConnectionProvider, ) from promptflow._sdk._utils import call_from_extension, read_write_by_user from promptflow._utils.logger_utils import get_cli_sdk_logger from promptflow._utils.yaml_utils import dump_yaml, load_yaml from promptflow.exceptions import ErrorTarget, ValidationException logger = get_cli_sdk_logger() class ConfigFileNotFound(ValidationException): pass class InvalidConfigFile(ValidationException): pass class InvalidConfigValue(ValidationException): pass class Configuration(object): CONFIG_PATH = Path(HOME_PROMPT_FLOW_DIR) / SERVICE_CONFIG_FILE COLLECT_TELEMETRY = "telemetry.enabled" EXTENSION_COLLECT_TELEMETRY = "extension.telemetry_enabled" INSTALLATION_ID = "cli.installation_id" CONNECTION_PROVIDER = "connection.provider" RUN_OUTPUT_PATH = "run.output_path" USER_AGENT = "user_agent" ENABLE_INTERNAL_FEATURES = "enable_internal_features" _instance = None def __init__(self, overrides=None): if not os.path.exists(self.CONFIG_PATH.parent): os.makedirs(self.CONFIG_PATH.parent, exist_ok=True) if not os.path.exists(self.CONFIG_PATH): self.CONFIG_PATH.touch(mode=read_write_by_user(), exist_ok=True) with open(self.CONFIG_PATH, "w", encoding=DEFAULT_ENCODING) as f: dump_yaml({}, f) self._config = load_yaml(self.CONFIG_PATH) if not self._config: self._config = {} # Allow config override by kwargs overrides = overrides or {} for key, value in overrides.items(): self._validate(key, value) pydash.set_(self._config, key, value) @property def config(self): return self._config @classmethod def get_instance(cls): """Use this to get instance to avoid multiple copies of same global config.""" if cls._instance is None: cls._instance = Configuration() return cls._instance def set_config(self, key, value): """Store config in file to avoid concurrent write.""" self._validate(key, value) pydash.set_(self._config, key, value) with open(self.CONFIG_PATH, "w", encoding=DEFAULT_ENCODING) as f: dump_yaml(self._config, f) def get_config(self, key): try: return pydash.get(self._config, key, None) except Exception: # pylint: disable=broad-except return None def get_all(self): return self._config @classmethod def _get_workspace_from_config( cls, *, path: Union[PathLike, str] = None, ) -> str: """Return a workspace arm id from an existing Azure Machine Learning Workspace. Reads workspace configuration from a file. Throws an exception if the config file can't be found. :param path: The path to the config file or starting directory to search. The parameter defaults to starting the search in the current directory. :type path: str :return: The workspace arm id for an existing Azure ML Workspace. :rtype: ~str """ from azure.ai.ml import MLClient from azure.ai.ml._file_utils.file_utils import traverse_up_path_and_find_file from azure.ai.ml.constants._common import AZUREML_RESOURCE_PROVIDER, RESOURCE_ID_FORMAT path = Path(".") if path is None else Path(path) if path.is_file(): found_path = path else: # Based on priority # Look in config dirs like .azureml or plain directory # with None directories_to_look = [".azureml", None] files_to_look = ["config.json"] found_path = None for curr_dir, curr_file in product(directories_to_look, files_to_look): logging.debug( "No config file directly found, starting search from %s " "directory, for %s file name to be present in " "%s subdirectory", path, curr_file, curr_dir, ) found_path = traverse_up_path_and_find_file( path=path, file_name=curr_file, directory_name=curr_dir, num_levels=20, ) if found_path: break if not found_path: msg = ( "We could not find config.json in: {} or in its parent directories. " "Please provide the full path to the config file or ensure that " "config.json exists in the parent directories." ) raise ConfigFileNotFound( message=msg.format(path), no_personal_data_message=msg.format("[path]"), target=ErrorTarget.CONTROL_PLANE_SDK, ) subscription_id, resource_group, workspace_name = MLClient._get_workspace_info(found_path) if not (subscription_id and resource_group and workspace_name): raise InvalidConfigFile( "The subscription_id, resource_group and workspace_name can not be empty. Got: " f"subscription_id: {subscription_id}, resource_group: {resource_group}, " f"workspace_name: {workspace_name} from file {found_path}." ) return RESOURCE_ID_FORMAT.format(subscription_id, resource_group, AZUREML_RESOURCE_PROVIDER, workspace_name) def get_connection_provider(self, path=None) -> Optional[str]: """Get the current connection provider. Default to local if not configured.""" provider = self.get_config(key=self.CONNECTION_PROVIDER) return self.resolve_connection_provider(provider, path=path) @classmethod def resolve_connection_provider(cls, provider, path=None) -> Optional[str]: if provider is None: return ConnectionProvider.LOCAL if provider == ConnectionProvider.AZUREML.value: # Note: The below function has azure-ai-ml dependency. return "azureml:" + cls._get_workspace_from_config(path=path) # If provider not None and not Azure, return it directly. # It can be the full path of a workspace. return provider def get_telemetry_consent(self) -> Optional[bool]: """Get the current telemetry consent value. Return None if not configured.""" if call_from_extension(): return self.get_config(key=self.EXTENSION_COLLECT_TELEMETRY) return self.get_config(key=self.COLLECT_TELEMETRY) def set_telemetry_consent(self, value): """Set the telemetry consent value and store in local.""" self.set_config(key=self.COLLECT_TELEMETRY, value=value) def get_or_set_installation_id(self): """Get user id if exists, otherwise set installation id and return it.""" user_id = self.get_config(key=self.INSTALLATION_ID) if user_id: return user_id else: user_id = str(uuid.uuid4()) self.set_config(key=self.INSTALLATION_ID, value=user_id) return user_id def get_run_output_path(self) -> Optional[str]: """Get the run output path in local.""" return self.get_config(key=self.RUN_OUTPUT_PATH) def _to_dict(self): return self._config @staticmethod def _validate(key: str, value: str) -> None: if key == Configuration.RUN_OUTPUT_PATH: if value.rstrip("/").endswith(FLOW_DIRECTORY_MACRO_IN_CONFIG): raise InvalidConfigValue( "Cannot specify flow directory as run output path; " "if you want to specify run output path under flow directory, " "please use its child folder, e.g. '${flow_directory}/.runs'." ) return def get_user_agent(self) -> Optional[str]: """Get customer set user agent. If set, will add prefix `PFCustomer_`""" user_agent = self.get_config(key=self.USER_AGENT) if user_agent: return f"PFCustomer_{user_agent}" return user_agent def is_internal_features_enabled(self) -> Optional[bool]: """Get enable_preview_features""" result = self.get_config(key=self.ENABLE_INTERNAL_FEATURES) if isinstance(result, str): return result.lower() == "true" return result is True
promptflow/src/promptflow/promptflow/_sdk/_configuration.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_sdk/_configuration.py", "repo_id": "promptflow", "token_count": 3891 }
33
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import flask from jinja2 import Template from pathlib import Path from flask import Blueprint, request, url_for, current_app as app def construct_staticweb_blueprint(static_folder): """Construct static web blueprint.""" staticweb_blueprint = Blueprint('staticweb_blueprint', __name__, static_folder=static_folder) @staticweb_blueprint.route("/", methods=["GET", "POST"]) def home(): """Show the home page.""" index_path = Path(static_folder) / "index.html" if static_folder else None if index_path and index_path.exists(): template = Template(open(index_path, "r", encoding="UTF-8").read()) return flask.render_template(template, url_for=url_for) else: return "<h1>Welcome to promptflow app.</h1>" @staticweb_blueprint.route("/<path:path>", methods=["GET", "POST", "PUT", "DELETE", "PATCH"]) def notfound(path): rules = {rule.rule: rule.methods for rule in app.url_map.iter_rules()} if path not in rules or request.method not in rules[path]: unsupported_message = ( f"The requested api {path!r} with {request.method} is not supported by current app, " f"if you entered the URL manually please check your spelling and try again." ) return unsupported_message, 404 return staticweb_blueprint
promptflow/src/promptflow/promptflow/_sdk/_serving/blueprint/static_web_blueprint.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/blueprint/static_web_blueprint.py", "repo_id": "promptflow", "token_count": 545 }
34
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import json import os import time import base64 import zlib from flask import jsonify, request from promptflow._sdk._serving._errors import ( JsonPayloadRequiredForMultipleInputFields, MissingRequiredFlowInput, NotAcceptable, ) from promptflow._utils.exception_utils import ErrorResponse, ExceptionPresenter from promptflow.contracts.flow import Flow as FlowContract from promptflow.exceptions import ErrorTarget def load_request_data(flow, raw_data, logger): try: data = json.loads(raw_data) except Exception: input = None if flow.inputs.keys().__len__() > 1: # this should only work if there's only 1 input field, otherwise it will fail # TODO: add a check to make sure there's only 1 input field message = ( "Promptflow executor received non json data, but there's more than 1 input fields, " "please use json request data instead." ) raise JsonPayloadRequiredForMultipleInputFields(message, target=ErrorTarget.SERVING_APP) if isinstance(raw_data, bytes) or isinstance(raw_data, bytearray): input = str(raw_data, "UTF-8") elif isinstance(raw_data, str): input = raw_data default_key = list(flow.inputs.keys())[0] logger.debug(f"Promptflow executor received non json data: {input}, default key: {default_key}") data = {default_key: input} return data def validate_request_data(flow, data): """Validate required request data is provided.""" # TODO: Note that we don't have default flow input presently, all of the default is None. required_inputs = [k for k, v in flow.inputs.items() if v.default is None] missing_inputs = [k for k in required_inputs if k not in data] if missing_inputs: raise MissingRequiredFlowInput( f"Required input fields {missing_inputs} are missing in request data {data!r}", target=ErrorTarget.SERVING_APP, ) def streaming_response_required(): """Check if streaming response is required.""" return "text/event-stream" in request.accept_mimetypes.values() def get_sample_json(project_path, logger): # load swagger sample if exists sample_file = os.path.join(project_path, "samples.json") if not os.path.exists(sample_file): return None logger.info("Promptflow sample file detected.") with open(sample_file, "r", encoding="UTF-8") as f: sample = json.load(f) return sample # get evaluation only fields def get_output_fields_to_remove(flow: FlowContract, logger) -> list: """get output fields to remove.""" included_outputs = os.getenv("PROMPTFLOW_RESPONSE_INCLUDED_FIELDS", None) if included_outputs: logger.info(f"Response included fields: {included_outputs}") res = json.loads(included_outputs) return [k for k, v in flow.outputs.items() if k not in res] return [k for k, v in flow.outputs.items() if v.evaluation_only] def handle_error_to_response(e, logger): presenter = ExceptionPresenter.create(e) logger.error(f"Promptflow serving app error: {presenter.to_dict()}") logger.error(f"Promptflow serving error traceback: {presenter.formatted_traceback}") resp = ErrorResponse(presenter.to_dict()) response_code = resp.response_code # The http response code for NotAcceptable is 406. # Currently the error framework does not allow response code overriding, # we add a check here to override the response code. # TODO: Consider how to embed this logic into the error framework. if isinstance(e, NotAcceptable): response_code = 406 return jsonify(resp.to_simplified_dict()), response_code def get_pf_serving_env(env_key: str): if len(env_key) == 0: return None value = os.getenv(env_key, None) if value is None and env_key.startswith("PROMPTFLOW_"): value = os.getenv(env_key.replace("PROMPTFLOW_", "PF_"), None) return value def get_cost_up_to_now(start_time: float): return (time.time() - start_time) * 1000 def enable_monitoring(func): func._enable_monitoring = True return func def normalize_connection_name(connection_name: str): return connection_name.replace(" ", "_") def decode_dict(data: str) -> dict: # str -> bytes data = data.encode() zipped_conns = base64.b64decode(data) # gzip decode conns_data = zlib.decompress(zipped_conns, 16 + zlib.MAX_WBITS) return json.loads(conns_data.decode()) def encode_dict(data: dict) -> str: # json encode data = json.dumps(data) # gzip compress gzip_compress = zlib.compressobj(9, zlib.DEFLATED, zlib.MAX_WBITS | 16) zipped_data = gzip_compress.compress(data.encode()) + gzip_compress.flush() # base64 encode b64_data = base64.b64encode(zipped_data) # bytes -> str return b64_data.decode()
promptflow/src/promptflow/promptflow/_sdk/_serving/utils.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/utils.py", "repo_id": "promptflow", "token_count": 1868 }
35
import json import os from pathlib import Path from PIL import Image import streamlit as st from streamlit_quill import st_quill from promptflow._sdk._serving.flow_invoker import FlowInvoker from utils import dict_iter_render_message, parse_list_from_html, parse_image_content invoker = None {% set indent_level = 4 %} def start(): def clear_chat() -> None: st.session_state.messages = [] def render_message(role, message_items): with st.chat_message(role): dict_iter_render_message(message_items) def show_conversation() -> None: if "messages" not in st.session_state: st.session_state.messages = [] st.session_state.history = [] if st.session_state.messages: for role, message_items in st.session_state.messages: render_message(role, message_items) def get_chat_history_from_session(): if "history" in st.session_state: return st.session_state.history return [] def submit(**kwargs) -> None: st.session_state.messages.append(("user", kwargs)) session_state_history = dict() session_state_history.update({"inputs": kwargs}) with container: render_message("user", kwargs) # Force append chat history to kwargs {% if is_chat_flow %} {{ ' ' * indent_level * 2 }}response = run_flow({'{{chat_history_input_name}}': get_chat_history_from_session(), **kwargs}) {% else %} {{ ' ' * indent_level * 2 }}response = run_flow(kwargs) {% endif %} st.session_state.messages.append(("assistant", response)) session_state_history.update({"outputs": response}) st.session_state.history.append(session_state_history) with container: render_message("assistant", response) def run_flow(data: dict) -> dict: global invoker if not invoker: {% if flow_path %} {{ ' ' * indent_level * 3 }}flow = Path('{{flow_path}}') {{ ' ' * indent_level * 3 }}dump_path = Path('{{flow_path}}').parent {% else %} {{ ' ' * indent_level * 3 }}flow = Path(__file__).parent / "flow" {{ ' ' * indent_level * 3 }}dump_path = flow.parent {% endif %} if flow.is_dir(): os.chdir(flow) else: os.chdir(flow.parent) invoker = FlowInvoker(flow, connection_provider="local", dump_to=dump_path) result = invoker.invoke(data) return result image = Image.open(Path(__file__).parent / "logo.png") st.set_page_config( layout="wide", page_title="{{flow_name}} - Promptflow App", page_icon=image, menu_items={ 'About': """ # This is a Promptflow App. You can refer to [promptflow](https://github.com/microsoft/promptflow) for more information. """ } ) # Set primary button color here since button color of the same form need to be identical in streamlit, but we only need Run/Chat button to be blue. st.config.set_option("theme.primaryColor", "#0F6CBD") st.title("{{flow_name}}") st.divider() st.chat_message("assistant").write("Hello, please input following flow inputs.") container = st.container() with container: show_conversation() with st.form(key='input_form', clear_on_submit=True): settings_path = os.path.join(os.path.dirname(__file__), "settings.json") if os.path.exists(settings_path): with open(settings_path, "r", encoding="utf-8") as file: json_data = json.load(file) environment_variables = list(json_data.keys()) for environment_variable in environment_variables: secret_input = st.sidebar.text_input(label=environment_variable, type="password", placeholder=f"Please input {environment_variable} here. If you input before, you can leave it blank.") if secret_input != "": os.environ[environment_variable] = secret_input {% for flow_input, (default_value, value_type) in flow_inputs.items() %} {% if value_type == "list" %} {{ ' ' * indent_level * 2 }}st.text('{{flow_input}}') {{ ' ' * indent_level * 2 }}{{flow_input}} = st_quill(html=True, toolbar=["image"], key='{{flow_input}}', placeholder='Please enter the list values and use the image icon to upload a picture. Make sure to format each list item correctly with line breaks') {% elif value_type == "image" %} {{ ' ' * indent_level * 2 }}{{flow_input}} = st.file_uploader(label='{{flow_input}}') {% elif value_type == "string" %} {{ ' ' * indent_level * 2 }}{{flow_input}} = st.text_input(label='{{flow_input}}', placeholder='{{default_value}}') {% else %} {{ ' ' * indent_level * 2 }}{{flow_input}} = st.text_input(label='{{flow_input}}', placeholder={{default_value}}) {% endif %} {% endfor %} cols = st.columns(7) submit_bt = cols[0].form_submit_button(label='{{label}}', type='primary') clear_bt = cols[1].form_submit_button(label='Clear') if submit_bt: with st.spinner("Loading..."): {% for flow_input, (default_value, value_type) in flow_inputs.items() %} {% if value_type == "list" %} {{ ' ' * indent_level * 4 }}{{flow_input}} = parse_list_from_html({{flow_input}}) {% elif value_type == "image" %} {{ ' ' * indent_level * 4 }}{{flow_input}} = parse_image_content({{flow_input}}, {{flow_input}}.type if {{flow_input}} else None) {% endif %} {% endfor %} submit({{flow_inputs_params}}) if clear_bt: with st.spinner("Cleaning..."): clear_chat() st.rerun() if __name__ == "__main__": start()
promptflow/src/promptflow/promptflow/_sdk/data/executable/main.py.jinja2/0
{ "file_path": "promptflow/src/promptflow/promptflow/_sdk/data/executable/main.py.jinja2", "repo_id": "promptflow", "token_count": 2320 }
36
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from datetime import datetime from typing import List from promptflow._sdk._constants import MAX_LIST_CLI_RESULTS from promptflow._sdk._orm import Connection as ORMConnection from promptflow._sdk._telemetry import ActivityType, TelemetryMixin, monitor_operation from promptflow._sdk._utils import safe_parse_object_list from promptflow._sdk.entities._connection import _Connection class ConnectionOperations(TelemetryMixin): """ConnectionOperations.""" def __init__(self, **kwargs): super().__init__(**kwargs) @monitor_operation(activity_name="pf.connections.list", activity_type=ActivityType.PUBLICAPI) def list( self, max_results: int = MAX_LIST_CLI_RESULTS, all_results: bool = False, ) -> List[_Connection]: """List connections. :param max_results: Max number of results to return. :type max_results: int :param all_results: Return all results. :type all_results: bool :return: List of run objects. :rtype: List[~promptflow.sdk.entities._connection._Connection] """ orm_connections = ORMConnection.list(max_results=max_results, all_results=all_results) return safe_parse_object_list( obj_list=orm_connections, parser=_Connection._from_orm_object, message_generator=lambda x: f"Failed to load connection {x.connectionName}, skipped.", ) @monitor_operation(activity_name="pf.connections.get", activity_type=ActivityType.PUBLICAPI) def get(self, name: str, **kwargs) -> _Connection: """Get a connection entity. :param name: Name of the connection. :type name: str :return: connection object retrieved from the database. :rtype: ~promptflow.sdk.entities._connection._Connection """ return self._get(name, **kwargs) def _get(self, name: str, **kwargs) -> _Connection: with_secrets = kwargs.get("with_secrets", False) raise_error = kwargs.get("raise_error", True) orm_connection = ORMConnection.get(name, raise_error) if orm_connection is None: return None if with_secrets: return _Connection._from_orm_object_with_secrets(orm_connection) return _Connection._from_orm_object(orm_connection) @monitor_operation(activity_name="pf.connections.delete", activity_type=ActivityType.PUBLICAPI) def delete(self, name: str) -> None: """Delete a connection entity. :param name: Name of the connection. :type name: str """ ORMConnection.delete(name) @monitor_operation(activity_name="pf.connections.create_or_update", activity_type=ActivityType.PUBLICAPI) def create_or_update(self, connection: _Connection, **kwargs): """Create or update a connection. :param connection: Run object to create or update. :type connection: ~promptflow.sdk.entities._connection._Connection """ orm_object = connection._to_orm_object() now = datetime.now().isoformat() if orm_object.createdDate is None: orm_object.createdDate = now orm_object.lastModifiedDate = now ORMConnection.create_or_update(orm_object) return self.get(connection.name)
promptflow/src/promptflow/promptflow/_sdk/operations/_connection_operations.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_sdk/operations/_connection_operations.py", "repo_id": "promptflow", "token_count": 1297 }
37
from promptflow.exceptions import SystemErrorException, UserErrorException, ValidationException class InvalidImageInput(ValidationException): pass class LoadMultimediaDataError(UserErrorException): pass class YamlParseError(SystemErrorException): """Exception raised when yaml parse failed.""" pass
promptflow/src/promptflow/promptflow/_utils/_errors.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_utils/_errors.py", "repo_id": "promptflow", "token_count": 85 }
38
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import contextvars import logging import threading from promptflow._utils.utils import set_context class RepeatLogTimer(threading.Timer): """Repeat to log message every interval seconds until it is cancelled.""" def __init__( self, interval_seconds: float, logger: logging.Logger, level: int, log_message_function, args: tuple = None ): self._logger = logger self._level = level self._log_message_function = log_message_function self._function_args = args if args else tuple() self._context = contextvars.copy_context() super().__init__(interval_seconds, function=None) def __enter__(self): self.start() return self def __exit__(self, *args): self.cancel() def run(self): """Override Timer.run method.""" # Set context variables from parent context. set_context(self._context) while not self.finished.wait(self.interval): if not self.finished.is_set(): msgs = self._log_message_function(*self._function_args) for msg in msgs: self._logger.log(self._level, msg) self.finished.set()
promptflow/src/promptflow/promptflow/_utils/thread_utils.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/_utils/thread_utils.py", "repo_id": "promptflow", "token_count": 505 }
39
# How to automatically generate the REST client code Rest client code in this folder are not manually written, but generated by autorest. ## Setup + install [nodejs](https://nodejs.org/en) + install autorest + run `npm install -g autorest` ## Download swagger.json Download swagger.json from [here](https://int.api.azureml-test.ms/flow/swagger/v1.0/swagger.json) to [promptflow/azure/_restclient](../promptflow/azure/_restclient) ## Update code + cd to [promptflow/azure/_restclient](../promptflow/azure/_restclient) + run `autorest --v3 --python --track2 --version=3.8.0 --use=@autorest/[email protected] --input-file=swagger.json --output-folder=. --namespace=flow --modelerfour.lenient-model-deduplication` + don't change `--use`. latest version of `autorest/python` will generate code following different pattern, which is not compatible with our code. ## Update the generation history - 2023.11.13 - [Update SDK restclient](https://github.com/microsoft/promptflow/pull/1101). - 2023.12.18 - [Remove data portal url from the result of pfazure run show](https://github.com/microsoft/promptflow/pull/1497) ## Troubleshooting ### Duplicate object schemas with "xxx" name detected. This may be caused by the duplicate generated class names. ```json "FlowFeature": { "type": "object", "properties": { "name": { "type": "string", "nullable": true }, "description": { "type": "string", "nullable": true }, "state": { "type": "object", "properties": { "Runtime": { "$ref": "#/components/schemas/FlowFeatureState" }, "Executor": { "$ref": "#/components/schemas/FlowFeatureState" }, "PFS": { "$ref": "#/components/schemas/FlowFeatureState" } }, "additionalProperties": false, "nullable": true } }, "additionalProperties": false }, "FlowFeatureState": { "enum": [ "Ready", "E2ETest" ], "type": "string" }, ``` `FlowFeature` has a nested object field `state`, which will be generated to a new class named `FlowFeatureState`, and it duplicates with the enum `FlowFeatureState`. To fix this, server side needs to change the class name in the schema, in this case, server side changed the object `state` to `states` and the problem is resolved.
promptflow/src/promptflow/promptflow/azure/_restclient/README.md/0
{ "file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/README.md", "repo_id": "promptflow", "token_count": 837 }
40
# coding=utf-8 # -------------------------------------------------------------------------- # Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected]) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import AsyncHttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator_async import distributed_trace_async from ... import models as _models from ..._vendor import _convert_request from ...operations._flow_runtimes_operations import build_check_ci_availability_request, build_check_mir_availability_request, build_check_runtime_upgrade_request, build_create_runtime_request, build_delete_runtime_request, build_get_runtime_capability_request, build_get_runtime_latest_config_request, build_get_runtime_request, build_list_runtimes_request, build_update_runtime_request T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]] class FlowRuntimesOperations: """FlowRuntimesOperations async operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~flow.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer) -> None: self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace_async async def create_runtime( self, subscription_id: str, resource_group_name: str, workspace_name: str, runtime_name: str, async_call: Optional[bool] = False, msi_token: Optional[bool] = False, skip_port_check: Optional[bool] = False, body: Optional["_models.CreateFlowRuntimeRequest"] = None, **kwargs: Any ) -> "_models.FlowRuntimeDto": """create_runtime. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :param async_call: :type async_call: bool :param msi_token: :type msi_token: bool :param skip_port_check: :type skip_port_check: bool :param body: :type body: ~flow.models.CreateFlowRuntimeRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeDto, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeDto :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] if body is not None: _json = self._serialize.body(body, 'CreateFlowRuntimeRequest') else: _json = None request = build_create_runtime_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, content_type=content_type, json=_json, async_call=async_call, msi_token=msi_token, skip_port_check=skip_port_check, template_url=self.create_runtime.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeDto', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore @distributed_trace_async async def update_runtime( self, subscription_id: str, resource_group_name: str, workspace_name: str, runtime_name: str, async_call: Optional[bool] = False, msi_token: Optional[bool] = False, skip_port_check: Optional[bool] = False, body: Optional["_models.UpdateFlowRuntimeRequest"] = None, **kwargs: Any ) -> "_models.FlowRuntimeDto": """update_runtime. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :param async_call: :type async_call: bool :param msi_token: :type msi_token: bool :param skip_port_check: :type skip_port_check: bool :param body: :type body: ~flow.models.UpdateFlowRuntimeRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeDto, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeDto :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] if body is not None: _json = self._serialize.body(body, 'UpdateFlowRuntimeRequest') else: _json = None request = build_update_runtime_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, content_type=content_type, json=_json, async_call=async_call, msi_token=msi_token, skip_port_check=skip_port_check, template_url=self.update_runtime.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeDto', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore @distributed_trace_async async def get_runtime( self, subscription_id: str, resource_group_name: str, workspace_name: str, runtime_name: str, **kwargs: Any ) -> "_models.FlowRuntimeDto": """get_runtime. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeDto, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeDto :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_runtime_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, template_url=self.get_runtime.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeDto', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore @distributed_trace_async async def delete_runtime( self, subscription_id: str, resource_group_name: str, workspace_name: str, runtime_name: str, async_call: Optional[bool] = False, msi_token: Optional[bool] = False, **kwargs: Any ) -> "_models.FlowRuntimeDto": """delete_runtime. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :param async_call: :type async_call: bool :param msi_token: :type msi_token: bool :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeDto, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeDto :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_delete_runtime_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, async_call=async_call, msi_token=msi_token, template_url=self.delete_runtime.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeDto', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized delete_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore @distributed_trace_async async def check_ci_availability( self, subscription_id: str, resource_group_name: str, workspace_name: str, compute_instance_name: str, custom_app_name: str, **kwargs: Any ) -> "_models.AvailabilityResponse": """check_ci_availability. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param compute_instance_name: :type compute_instance_name: str :param custom_app_name: :type custom_app_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AvailabilityResponse, or the result of cls(response) :rtype: ~flow.models.AvailabilityResponse :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityResponse"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_check_ci_availability_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, compute_instance_name=compute_instance_name, custom_app_name=custom_app_name, template_url=self.check_ci_availability.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AvailabilityResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized check_ci_availability.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/checkCiAvailability'} # type: ignore @distributed_trace_async async def check_mir_availability( self, subscription_id: str, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str, **kwargs: Any ) -> "_models.AvailabilityResponse": """check_mir_availability. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param endpoint_name: :type endpoint_name: str :param deployment_name: :type deployment_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AvailabilityResponse, or the result of cls(response) :rtype: ~flow.models.AvailabilityResponse :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityResponse"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_check_mir_availability_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, endpoint_name=endpoint_name, deployment_name=deployment_name, template_url=self.check_mir_availability.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AvailabilityResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized check_mir_availability.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/checkMirAvailability'} # type: ignore @distributed_trace_async async def check_runtime_upgrade( self, subscription_id: str, resource_group_name: str, workspace_name: str, runtime_name: str, **kwargs: Any ) -> bool: """check_runtime_upgrade. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: bool, or the result of cls(response) :rtype: bool :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[bool] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_check_runtime_upgrade_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, template_url=self.check_runtime_upgrade.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('bool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized check_runtime_upgrade.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}/needUpgrade'} # type: ignore @distributed_trace_async async def get_runtime_capability( self, subscription_id: str, resource_group_name: str, workspace_name: str, runtime_name: str, **kwargs: Any ) -> "_models.FlowRuntimeCapability": """get_runtime_capability. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeCapability, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeCapability :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeCapability"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_runtime_capability_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, template_url=self.get_runtime_capability.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeCapability', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_runtime_capability.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}/capability'} # type: ignore @distributed_trace_async async def get_runtime_latest_config( self, subscription_id: str, resource_group_name: str, workspace_name: str, **kwargs: Any ) -> "_models.RuntimeConfiguration": """get_runtime_latest_config. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: RuntimeConfiguration, or the result of cls(response) :rtype: ~flow.models.RuntimeConfiguration :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RuntimeConfiguration"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_runtime_latest_config_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, template_url=self.get_runtime_latest_config.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('RuntimeConfiguration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_runtime_latest_config.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/latestConfig'} # type: ignore @distributed_trace_async async def list_runtimes( self, subscription_id: str, resource_group_name: str, workspace_name: str, **kwargs: Any ) -> List["_models.FlowRuntimeDto"]: """list_runtimes. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: list of FlowRuntimeDto, or the result of cls(response) :rtype: list[~flow.models.FlowRuntimeDto] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FlowRuntimeDto"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_list_runtimes_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, template_url=self.list_runtimes.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('[FlowRuntimeDto]', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_runtimes.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes'} # type: ignore
promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flow_runtimes_operations.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flow_runtimes_operations.py", "repo_id": "promptflow", "token_count": 11768 }
41
# coding=utf-8 # -------------------------------------------------------------------------- # Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected]) # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() _SERIALIZER.client_side_validation = False # fmt: off def build_create_runtime_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest content_type = kwargs.pop('content_type', None) # type: Optional[str] async_call = kwargs.pop('async_call', False) # type: Optional[bool] msi_token = kwargs.pop('msi_token', False) # type: Optional[bool] skip_port_check = kwargs.pop('skip_port_check', False) # type: Optional[bool] accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), "runtimeName": _SERIALIZER.url("runtime_name", runtime_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if async_call is not None: query_parameters['asyncCall'] = _SERIALIZER.query("async_call", async_call, 'bool') if msi_token is not None: query_parameters['msiToken'] = _SERIALIZER.query("msi_token", msi_token, 'bool') if skip_port_check is not None: query_parameters['skipPortCheck'] = _SERIALIZER.query("skip_port_check", skip_port_check, 'bool') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_update_runtime_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest content_type = kwargs.pop('content_type', None) # type: Optional[str] async_call = kwargs.pop('async_call', False) # type: Optional[bool] msi_token = kwargs.pop('msi_token', False) # type: Optional[bool] skip_port_check = kwargs.pop('skip_port_check', False) # type: Optional[bool] accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), "runtimeName": _SERIALIZER.url("runtime_name", runtime_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if async_call is not None: query_parameters['asyncCall'] = _SERIALIZER.query("async_call", async_call, 'bool') if msi_token is not None: query_parameters['msiToken'] = _SERIALIZER.query("msi_token", msi_token, 'bool') if skip_port_check is not None: query_parameters['skipPortCheck'] = _SERIALIZER.query("skip_port_check", skip_port_check, 'bool') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_runtime_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), "runtimeName": _SERIALIZER.url("runtime_name", runtime_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, headers=header_parameters, **kwargs ) def build_delete_runtime_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest async_call = kwargs.pop('async_call', False) # type: Optional[bool] msi_token = kwargs.pop('msi_token', False) # type: Optional[bool] accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), "runtimeName": _SERIALIZER.url("runtime_name", runtime_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] if async_call is not None: query_parameters['asyncCall'] = _SERIALIZER.query("async_call", async_call, 'bool') if msi_token is not None: query_parameters['msiToken'] = _SERIALIZER.query("msi_token", msi_token, 'bool') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="DELETE", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_check_ci_availability_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest compute_instance_name = kwargs.pop('compute_instance_name') # type: str custom_app_name = kwargs.pop('custom_app_name') # type: str accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/checkCiAvailability') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['computeInstanceName'] = _SERIALIZER.query("compute_instance_name", compute_instance_name, 'str') query_parameters['customAppName'] = _SERIALIZER.query("custom_app_name", custom_app_name, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_check_mir_availability_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest endpoint_name = kwargs.pop('endpoint_name') # type: str deployment_name = kwargs.pop('deployment_name') # type: str accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/checkMirAvailability') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['endpointName'] = _SERIALIZER.query("endpoint_name", endpoint_name, 'str') query_parameters['deploymentName'] = _SERIALIZER.query("deployment_name", deployment_name, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_check_runtime_upgrade_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}/needUpgrade') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), "runtimeName": _SERIALIZER.url("runtime_name", runtime_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, headers=header_parameters, **kwargs ) def build_get_runtime_capability_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}/capability') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), "runtimeName": _SERIALIZER.url("runtime_name", runtime_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, headers=header_parameters, **kwargs ) def build_get_runtime_latest_config_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/latestConfig') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, headers=header_parameters, **kwargs ) def build_list_runtimes_request( subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes') path_format_arguments = { "subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'), "resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'), "workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, headers=header_parameters, **kwargs ) # fmt: on class FlowRuntimesOperations(object): """FlowRuntimesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~flow.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def create_runtime( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str async_call=False, # type: Optional[bool] msi_token=False, # type: Optional[bool] skip_port_check=False, # type: Optional[bool] body=None, # type: Optional["_models.CreateFlowRuntimeRequest"] **kwargs # type: Any ): # type: (...) -> "_models.FlowRuntimeDto" """create_runtime. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :param async_call: :type async_call: bool :param msi_token: :type msi_token: bool :param skip_port_check: :type skip_port_check: bool :param body: :type body: ~flow.models.CreateFlowRuntimeRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeDto, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeDto :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] if body is not None: _json = self._serialize.body(body, 'CreateFlowRuntimeRequest') else: _json = None request = build_create_runtime_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, content_type=content_type, json=_json, async_call=async_call, msi_token=msi_token, skip_port_check=skip_port_check, template_url=self.create_runtime.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeDto', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore @distributed_trace def update_runtime( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str async_call=False, # type: Optional[bool] msi_token=False, # type: Optional[bool] skip_port_check=False, # type: Optional[bool] body=None, # type: Optional["_models.UpdateFlowRuntimeRequest"] **kwargs # type: Any ): # type: (...) -> "_models.FlowRuntimeDto" """update_runtime. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :param async_call: :type async_call: bool :param msi_token: :type msi_token: bool :param skip_port_check: :type skip_port_check: bool :param body: :type body: ~flow.models.UpdateFlowRuntimeRequest :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeDto, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeDto :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] if body is not None: _json = self._serialize.body(body, 'UpdateFlowRuntimeRequest') else: _json = None request = build_update_runtime_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, content_type=content_type, json=_json, async_call=async_call, msi_token=msi_token, skip_port_check=skip_port_check, template_url=self.update_runtime.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeDto', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized update_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore @distributed_trace def get_runtime( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.FlowRuntimeDto" """get_runtime. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeDto, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeDto :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_runtime_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, template_url=self.get_runtime.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeDto', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore @distributed_trace def delete_runtime( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str async_call=False, # type: Optional[bool] msi_token=False, # type: Optional[bool] **kwargs # type: Any ): # type: (...) -> "_models.FlowRuntimeDto" """delete_runtime. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :param async_call: :type async_call: bool :param msi_token: :type msi_token: bool :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeDto, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeDto :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeDto"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_delete_runtime_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, async_call=async_call, msi_token=msi_token, template_url=self.delete_runtime.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeDto', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized delete_runtime.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}'} # type: ignore @distributed_trace def check_ci_availability( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str compute_instance_name, # type: str custom_app_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.AvailabilityResponse" """check_ci_availability. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param compute_instance_name: :type compute_instance_name: str :param custom_app_name: :type custom_app_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AvailabilityResponse, or the result of cls(response) :rtype: ~flow.models.AvailabilityResponse :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityResponse"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_check_ci_availability_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, compute_instance_name=compute_instance_name, custom_app_name=custom_app_name, template_url=self.check_ci_availability.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AvailabilityResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized check_ci_availability.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/checkCiAvailability'} # type: ignore @distributed_trace def check_mir_availability( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str endpoint_name, # type: str deployment_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.AvailabilityResponse" """check_mir_availability. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param endpoint_name: :type endpoint_name: str :param deployment_name: :type deployment_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: AvailabilityResponse, or the result of cls(response) :rtype: ~flow.models.AvailabilityResponse :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.AvailabilityResponse"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_check_mir_availability_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, endpoint_name=endpoint_name, deployment_name=deployment_name, template_url=self.check_mir_availability.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('AvailabilityResponse', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized check_mir_availability.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/checkMirAvailability'} # type: ignore @distributed_trace def check_runtime_upgrade( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str **kwargs # type: Any ): # type: (...) -> bool """check_runtime_upgrade. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: bool, or the result of cls(response) :rtype: bool :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[bool] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_check_runtime_upgrade_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, template_url=self.check_runtime_upgrade.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('bool', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized check_runtime_upgrade.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}/needUpgrade'} # type: ignore @distributed_trace def get_runtime_capability( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str runtime_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.FlowRuntimeCapability" """get_runtime_capability. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :param runtime_name: :type runtime_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: FlowRuntimeCapability, or the result of cls(response) :rtype: ~flow.models.FlowRuntimeCapability :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.FlowRuntimeCapability"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_runtime_capability_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, runtime_name=runtime_name, template_url=self.get_runtime_capability.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('FlowRuntimeCapability', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_runtime_capability.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/{runtimeName}/capability'} # type: ignore @distributed_trace def get_runtime_latest_config( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.RuntimeConfiguration" """get_runtime_latest_config. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: RuntimeConfiguration, or the result of cls(response) :rtype: ~flow.models.RuntimeConfiguration :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.RuntimeConfiguration"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_get_runtime_latest_config_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, template_url=self.get_runtime_latest_config.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('RuntimeConfiguration', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_runtime_latest_config.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes/latestConfig'} # type: ignore @distributed_trace def list_runtimes( self, subscription_id, # type: str resource_group_name, # type: str workspace_name, # type: str **kwargs # type: Any ): # type: (...) -> List["_models.FlowRuntimeDto"] """list_runtimes. :param subscription_id: The Azure Subscription ID. :type subscription_id: str :param resource_group_name: The Name of the resource group in which the workspace is located. :type resource_group_name: str :param workspace_name: The name of the workspace. :type workspace_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: list of FlowRuntimeDto, or the result of cls(response) :rtype: list[~flow.models.FlowRuntimeDto] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[List["_models.FlowRuntimeDto"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) request = build_list_runtimes_request( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, template_url=self.list_runtimes.metadata['url'], ) request = _convert_request(request) request.url = self._client.format_url(request.url) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = self._deserialize('[FlowRuntimeDto]', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized list_runtimes.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowRuntimes'} # type: ignore
promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flow_runtimes_operations.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flow_runtimes_operations.py", "repo_id": "promptflow", "token_count": 17964 }
42
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- from dataclasses import dataclass from datetime import datetime from itertools import chain from typing import Any, List, Mapping from promptflow._utils.exception_utils import RootErrorCode from promptflow._utils.openai_metrics_calculator import OpenAIMetricsCalculator from promptflow.contracts.run_info import RunInfo, Status from promptflow.executor._result import AggregationResult, LineResult @dataclass class LineError: """The error of a line in a batch run. It contains the line number and the error dict of a failed line in the batch run. The error dict is gengerated by ExceptionPresenter.to_dict(). """ line_number: int error: Mapping[str, Any] def to_dict(self): return { "line_number": self.line_number, "error": self.error, } @dataclass class ErrorSummary: """The summary of errors in a batch run. :param failed_user_error_lines: The number of lines that failed with user error. :type failed_user_error_lines: int :param failed_system_error_lines: The number of lines that failed with system error. :type failed_system_error_lines: int :param error_list: The line number and error dict of failed lines in the line results. :type error_list: List[~promptflow.batch._result.LineError] :param aggr_error_dict: The dict of node name and error dict of failed nodes in the aggregation result. :type aggr_error_dict: Mapping[str, Any] """ failed_user_error_lines: int failed_system_error_lines: int error_list: List[LineError] aggr_error_dict: Mapping[str, Any] @staticmethod def create(line_results: List[LineResult], aggr_result: AggregationResult): failed_user_error_lines = 0 failed_system_error_lines = 0 error_list: List[LineError] = [] for line_result in line_results: if line_result.run_info.status != Status.Failed: continue flow_run = line_result.run_info if flow_run.error.get("code", "") == RootErrorCode.USER_ERROR: failed_user_error_lines += 1 else: failed_system_error_lines += 1 line_error = LineError( line_number=flow_run.index, error=flow_run.error, ) error_list.append(line_error) error_summary = ErrorSummary( failed_user_error_lines=failed_user_error_lines, failed_system_error_lines=failed_system_error_lines, error_list=sorted(error_list, key=lambda x: x.line_number), aggr_error_dict={ node_name: node_run_info.error for node_name, node_run_info in aggr_result.node_run_infos.items() if node_run_info.status == Status.Failed }, ) return error_summary @dataclass class SystemMetrics: """The system metrics of a batch run.""" total_tokens: int prompt_tokens: int completion_tokens: int duration: float # in seconds @staticmethod def create( start_time: datetime, end_time: datetime, line_results: List[LineResult], aggr_results: AggregationResult ): openai_metrics = SystemMetrics._get_openai_metrics(line_results, aggr_results) return SystemMetrics( total_tokens=openai_metrics.get("total_tokens", 0), prompt_tokens=openai_metrics.get("prompt_tokens", 0), completion_tokens=openai_metrics.get("completion_tokens", 0), duration=(end_time - start_time).total_seconds(), ) @staticmethod def _get_openai_metrics(line_results: List[LineResult], aggr_results: AggregationResult): node_run_infos = _get_node_run_infos(line_results, aggr_results) total_metrics = {} calculator = OpenAIMetricsCalculator() for run_info in node_run_infos: metrics = SystemMetrics._try_get_openai_metrics(run_info) if metrics: calculator.merge_metrics_dict(total_metrics, metrics) else: api_calls = run_info.api_calls or [] for call in api_calls: metrics = calculator.get_openai_metrics_from_api_call(call) calculator.merge_metrics_dict(total_metrics, metrics) return total_metrics def _try_get_openai_metrics(run_info: RunInfo): openai_metrics = {} if run_info.system_metrics: for metric in ["total_tokens", "prompt_tokens", "completion_tokens"]: if metric not in run_info.system_metrics: return False openai_metrics[metric] = run_info.system_metrics[metric] return openai_metrics def to_dict(self): return { "total_tokens": self.total_tokens, "prompt_tokens": self.prompt_tokens, "completion_tokens": self.completion_tokens, "duration": self.duration, } @dataclass class BatchResult: """The result of a batch run.""" status: Status total_lines: int completed_lines: int failed_lines: int node_status: Mapping[str, int] start_time: datetime end_time: datetime metrics: Mapping[str, str] system_metrics: SystemMetrics error_summary: ErrorSummary @classmethod def create( cls, start_time: datetime, end_time: datetime, line_results: List[LineResult], aggr_result: AggregationResult, status: Status = Status.Completed, ) -> "BatchResult": total_lines = len(line_results) completed_lines = sum(line_result.run_info.status == Status.Completed for line_result in line_results) failed_lines = total_lines - completed_lines return cls( status=status, total_lines=total_lines, completed_lines=completed_lines, failed_lines=failed_lines, node_status=BatchResult._get_node_status(line_results, aggr_result), start_time=start_time, end_time=end_time, metrics=aggr_result.metrics, system_metrics=SystemMetrics.create(start_time, end_time, line_results, aggr_result), error_summary=ErrorSummary.create(line_results, aggr_result), ) @staticmethod def _get_node_status(line_results: List[LineResult], aggr_result: AggregationResult): node_run_infos = _get_node_run_infos(line_results, aggr_result) node_status = {} for node_run_info in node_run_infos: key = f"{node_run_info.node}.{node_run_info.status.value.lower()}" node_status[key] = node_status.get(key, 0) + 1 return node_status def _get_node_run_infos(line_results: List[LineResult], aggr_result: AggregationResult): line_node_run_infos = ( node_run_info for line_result in line_results for node_run_info in line_result.node_run_infos.values() ) aggr_node_run_infos = (node_run_info for node_run_info in aggr_result.node_run_infos.values()) return chain(line_node_run_infos, aggr_node_run_infos)
promptflow/src/promptflow/promptflow/batch/_result.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/batch/_result.py", "repo_id": "promptflow", "token_count": 3127 }
43
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import asyncio import contextvars import inspect import os import signal import threading import time import traceback from asyncio import Task from concurrent.futures import ThreadPoolExecutor from typing import Any, Dict, List, Tuple from promptflow._core.flow_execution_context import FlowExecutionContext from promptflow._core.tools_manager import ToolsManager from promptflow._utils.logger_utils import flow_logger from promptflow._utils.utils import extract_user_frame_summaries, set_context from promptflow.contracts.flow import Node from promptflow.executor._dag_manager import DAGManager from promptflow.executor._errors import NoNodeExecutedError PF_ASYNC_NODE_SCHEDULER_EXECUTE_TASK_NAME = "_pf_async_nodes_scheduler.execute" DEFAULT_TASK_LOGGING_INTERVAL = 60 ASYNC_DAG_MANAGER_COMPLETED = False class AsyncNodesScheduler: def __init__( self, tools_manager: ToolsManager, node_concurrency: int, ) -> None: self._tools_manager = tools_manager self._node_concurrency = node_concurrency self._task_start_time = {} self._task_last_log_time = {} self._dag_manager_completed_event = threading.Event() async def execute( self, nodes: List[Node], inputs: Dict[str, Any], context: FlowExecutionContext, ) -> Tuple[dict, dict]: # TODO: Provide cancel API if threading.current_thread() is threading.main_thread(): signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) else: flow_logger.info( "Current thread is not main thread, skip signal handler registration in AsyncNodesScheduler." ) # Semaphore should be created in the loop, otherwise it will not work. loop = asyncio.get_running_loop() self._semaphore = asyncio.Semaphore(self._node_concurrency) monitor = threading.Thread( target=monitor_long_running_coroutine, args=(loop, self._task_start_time, self._task_last_log_time, self._dag_manager_completed_event), daemon=True, ) monitor.start() # Set the name of scheduler tasks to avoid monitoring its duration task = asyncio.current_task() task.set_name(PF_ASYNC_NODE_SCHEDULER_EXECUTE_TASK_NAME) parent_context = contextvars.copy_context() executor = ThreadPoolExecutor( max_workers=self._node_concurrency, initializer=set_context, initargs=(parent_context,) ) # Note that we must not use `with` statement to manage the executor. # This is because it will always call `executor.shutdown()` when exiting the `with` block. # Then the event loop will wait for all tasks to be completed before raising the cancellation error. # See reference: https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.Executor outputs = await self._execute_with_thread_pool(executor, nodes, inputs, context) executor.shutdown() return outputs async def _execute_with_thread_pool( self, executor: ThreadPoolExecutor, nodes: List[Node], inputs: Dict[str, Any], context: FlowExecutionContext, ) -> Tuple[dict, dict]: flow_logger.info(f"Start to run {len(nodes)} nodes with the current event loop.") dag_manager = DAGManager(nodes, inputs) task2nodes = self._execute_nodes(dag_manager, context, executor) while not dag_manager.completed(): task2nodes = await self._wait_and_complete_nodes(task2nodes, dag_manager) submitted_tasks2nodes = self._execute_nodes(dag_manager, context, executor) task2nodes.update(submitted_tasks2nodes) # Set the event to notify the monitor thread to exit # Ref: https://docs.python.org/3/library/threading.html#event-objects self._dag_manager_completed_event.set() for node in dag_manager.bypassed_nodes: dag_manager.completed_nodes_outputs[node] = None return dag_manager.completed_nodes_outputs, dag_manager.bypassed_nodes async def _wait_and_complete_nodes(self, task2nodes: Dict[Task, Node], dag_manager: DAGManager) -> Dict[Task, Node]: if not task2nodes: raise NoNodeExecutedError("No nodes are ready for execution, but the flow is not completed.") tasks = [task for task in task2nodes] for task in tasks: self._task_start_time[task] = time.time() done, _ = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED) dag_manager.complete_nodes({task2nodes[task].name: task.result() for task in done}) for task in done: del task2nodes[task] return task2nodes def _execute_nodes( self, dag_manager: DAGManager, context: FlowExecutionContext, executor: ThreadPoolExecutor, ) -> Dict[Task, Node]: # Bypass nodes and update node run info until there are no nodes to bypass nodes_to_bypass = dag_manager.pop_bypassable_nodes() while nodes_to_bypass: for node in nodes_to_bypass: context.bypass_node(node) nodes_to_bypass = dag_manager.pop_bypassable_nodes() # Create tasks for ready nodes return { self._create_node_task(node, dag_manager, context, executor): node for node in dag_manager.pop_ready_nodes() } async def run_task_with_semaphore(self, coroutine): async with self._semaphore: return await coroutine def _create_node_task( self, node: Node, dag_manager: DAGManager, context: FlowExecutionContext, executor: ThreadPoolExecutor, ) -> Task: f = self._tools_manager.get_tool(node.name) kwargs = dag_manager.get_node_valid_inputs(node, f) if inspect.iscoroutinefunction(f): # For async task, it will not be executed before calling create_task. task = context.invoke_tool_async(node, f, kwargs) else: # For sync task, convert it to async task and run it in executor thread. # Even though the task is put to the thread pool, thread.start will only be triggered after create_task. task = self._sync_function_to_async_task(executor, context, node, f, kwargs) # Set the name of the task to the node name for debugging purpose # It does not need to be unique by design. # Wrap the coroutine in a task with asyncio.create_task to schedule it for event loop execution # The task is created and added to the event loop, but the exact execution depends on loop's scheduling return asyncio.create_task(self.run_task_with_semaphore(task), name=node.name) @staticmethod async def _sync_function_to_async_task( executor: ThreadPoolExecutor, context: FlowExecutionContext, node, f, kwargs, ): # The task will not be executed before calling create_task. return await asyncio.get_running_loop().run_in_executor(executor, context.invoke_tool, node, f, kwargs) def signal_handler(sig, frame): """ Start a thread to monitor coroutines after receiving signal. """ flow_logger.info(f"Received signal {sig}({signal.Signals(sig).name}), start coroutine monitor thread.") loop = asyncio.get_running_loop() monitor = threading.Thread(target=monitor_coroutine_after_cancellation, args=(loop,)) monitor.start() raise KeyboardInterrupt def log_stack_recursively(task: asyncio.Task, elapse_time: float): """Recursively log the frame of a task or coroutine. Traditional stacktrace would stop at the first awaited nested inside the coroutine. :param task: Task to log :type task_or_coroutine: asyncio.Task :param elapse_time: Seconds elapsed since the task started :type elapse_time: float """ # We cannot use task.get_stack() to get the stack, because only one stack frame is # returned for a suspended coroutine because of the implementation of CPython # Ref: https://github.com/python/cpython/blob/main/Lib/asyncio/tasks.py # "only one stack frame is returned for a suspended coroutine." task_or_coroutine = task frame_summaries = [] # Collect frame_summaries along async call chain while True: if isinstance(task_or_coroutine, asyncio.Task): # For a task, get the coroutine it's running coroutine: asyncio.coroutine = task_or_coroutine.get_coro() elif asyncio.iscoroutine(task_or_coroutine): coroutine = task_or_coroutine else: break frame = coroutine.cr_frame stack_summary: traceback.StackSummary = traceback.extract_stack(frame) frame_summaries.extend(stack_summary) task_or_coroutine = coroutine.cr_await # Format the frame summaries to warning message if frame_summaries: user_frame_summaries = extract_user_frame_summaries(frame_summaries) stack_messages = traceback.format_list(user_frame_summaries) all_stack_message = "".join(stack_messages) task_msg = ( f"Task {task.get_name()} has been running for {elapse_time:.0f} seconds," f" stacktrace:\n{all_stack_message}" ) flow_logger.warning(task_msg) def monitor_long_running_coroutine( loop: asyncio.AbstractEventLoop, task_start_time: dict, task_last_log_time: dict, dag_manager_completed_event: threading.Event, ): flow_logger.info("monitor_long_running_coroutine started") logging_interval = DEFAULT_TASK_LOGGING_INTERVAL logging_interval_in_env = os.environ.get("PF_TASK_PEEKING_INTERVAL") if logging_interval_in_env: try: value = int(logging_interval_in_env) if value <= 0: raise ValueError logging_interval = value flow_logger.info( f"Using value of PF_TASK_PEEKING_INTERVAL in environment variable as " f"logging interval: {logging_interval_in_env}" ) except ValueError: flow_logger.warning( f"Value of PF_TASK_PEEKING_INTERVAL in environment variable ('{logging_interval_in_env}') " f"is invalid, use default value {DEFAULT_TASK_LOGGING_INTERVAL}" ) while not dag_manager_completed_event.is_set(): running_tasks = [task for task in asyncio.all_tasks(loop) if not task.done()] # get duration of running tasks for task in running_tasks: # Do not monitor the scheduler task if task.get_name() == PF_ASYNC_NODE_SCHEDULER_EXECUTE_TASK_NAME: continue # Do not monitor sync tools, since they will run in executor thread and will # be monitored by RepeatLogTimer. task_stacks = task.get_stack() if ( task_stacks and task_stacks[-1].f_code and task_stacks[-1].f_code.co_name == AsyncNodesScheduler._sync_function_to_async_task.__name__ ): continue if task_start_time.get(task) is None: flow_logger.warning(f"task {task.get_name()} has no start time, which should not happen") else: duration = time.time() - task_start_time[task] if duration > logging_interval: if ( task_last_log_time.get(task) is None or time.time() - task_last_log_time[task] > logging_interval ): log_stack_recursively(task, duration) task_last_log_time[task] = time.time() time.sleep(1) def monitor_coroutine_after_cancellation(loop: asyncio.AbstractEventLoop): """Exit the process when all coroutines are done. We add this function because if a sync tool is running in async mode, the task will be cancelled after receiving SIGINT, but the thread will not be terminated and blocks the program from exiting. :param loop: event loop of main thread :type loop: asyncio.AbstractEventLoop """ # TODO: Use environment variable to ensure it is flow test scenario to avoid unexpected exit. # E.g. Customer is integrating Promptflow in their own code, and they want to handle SIGINT by themselves. max_wait_seconds = os.environ.get("PF_WAIT_SECONDS_AFTER_CANCELLATION", 30) all_tasks_are_done = False exceeded_wait_seconds = False thread_start_time = time.time() flow_logger.info(f"Start to monitor coroutines after cancellation, max wait seconds: {max_wait_seconds}s") while not all_tasks_are_done and not exceeded_wait_seconds: # For sync tool running in async mode, the task will be cancelled, # but the thread will not be terminated, we exit the program despite of it. # TODO: Detect whether there is any sync tool running in async mode, # if there is none, avoid sys.exit and let the program exit gracefully. all_tasks_are_done = all(task.done() for task in asyncio.all_tasks(loop)) if all_tasks_are_done: flow_logger.info("All coroutines are done. Exiting.") # We cannot ensure persist_flow_run is called before the process exits in the case that there is # non-daemon thread running, sleep for 3 seconds as a best effort. # If the caller wants to ensure flow status is cancelled in storage, it should check the flow status # after timeout and set the flow status to Cancelled. time.sleep(3) # Use os._exit instead of sys.exit, so that the process can stop without # waiting for the thread created by run_in_executor to finish. # sys.exit: https://docs.python.org/3/library/sys.html#sys.exit # Raise a SystemExit exception, signaling an intention to exit the interpreter. # Specifically, it does not exit non-daemon thread # os._exit https://docs.python.org/3/library/os.html#os._exit # Exit the process with status n, without calling cleanup handlers, flushing stdio buffers, etc. # Specifically, it stops process without waiting for non-daemon thread. os._exit(0) exceeded_wait_seconds = time.time() - thread_start_time > max_wait_seconds time.sleep(1) if exceeded_wait_seconds: if not all_tasks_are_done: flow_logger.info( f"Not all coroutines are done within {max_wait_seconds}s" " after cancellation. Exiting the process despite of them." " Please config the environment variable" " PF_WAIT_SECONDS_AFTER_CANCELLATION if your tool needs" " more time to clean up after cancellation." ) remaining_tasks = [task for task in asyncio.all_tasks(loop) if not task.done()] flow_logger.info(f"Remaining tasks: {[task.get_name() for task in remaining_tasks]}") time.sleep(3) os._exit(0)
promptflow/src/promptflow/promptflow/executor/_async_nodes_scheduler.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/executor/_async_nodes_scheduler.py", "repo_id": "promptflow", "token_count": 6253 }
44
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- __path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore from promptflow._sdk.operations._connection_operations import ConnectionOperations from promptflow._sdk.operations._flow_operations import FlowOperations from promptflow._sdk.operations._run_operations import RunOperations __all__ = ["ConnectionOperations", "FlowOperations", "RunOperations"]
promptflow/src/promptflow/promptflow/operations/__init__.py/0
{ "file_path": "promptflow/src/promptflow/promptflow/operations/__init__.py", "repo_id": "promptflow", "token_count": 133 }
45
from pathlib import Path from tempfile import mkdtemp import pytest from promptflow._utils.exception_utils import ExceptionPresenter from promptflow.batch import BatchEngine from promptflow.batch._result import BatchResult, LineError from promptflow.contracts.run_info import Status from promptflow.executor._errors import BatchExecutionTimeoutError, LineExecutionTimeoutError from ..utils import MemoryRunStorage, get_flow_folder, get_flow_inputs_file, get_yaml_file SAMPLE_FLOW = "web_classification_no_variants" ONE_LINE_OF_BULK_TEST_TIMEOUT = "one_line_of_bulktest_timeout" @pytest.mark.usefixtures("use_secrets_config_file", "dev_connections") @pytest.mark.e2etest class TestBatchTimeout: @pytest.mark.parametrize( "flow_folder", [ ONE_LINE_OF_BULK_TEST_TIMEOUT, ], ) def test_batch_with_line_timeout(self, flow_folder, dev_connections): # set line timeout to 1 second for testing mem_run_storage = MemoryRunStorage() batch_engine = BatchEngine( get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections, storage=mem_run_storage, ) batch_engine._line_timeout_sec = 5 # prepare input file and output dir input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name="samples_all_timeout.json")} output_dir = Path(mkdtemp()) inputs_mapping = {"idx": "${data.idx}"} batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir) assert isinstance(batch_results, BatchResult) assert batch_results.completed_lines == 0 assert batch_results.failed_lines == 2 assert batch_results.total_lines == 2 assert batch_results.node_status == { "my_python_tool_with_failed_line.canceled": 2, "my_python_tool.completed": 2, } # assert mem_run_storage persists run infos correctly assert len(mem_run_storage._flow_runs) == 2, "Flow runs are persisted in memory storage." assert len(mem_run_storage._node_runs) == 4, "Node runs are persisted in memory storage." msg = "Tool execution is canceled because of the error: Line execution timeout after 5 seconds." for run in mem_run_storage._node_runs.values(): if run.node == "my_python_tool_with_failed_line": assert run.status == Status.Canceled assert run.error["message"] == msg else: assert run.status == Status.Completed assert batch_results.status == Status.Completed assert batch_results.total_lines == 2 assert batch_results.completed_lines == 0 assert batch_results.failed_lines == 2 assert batch_results.error_summary.failed_user_error_lines == 2 assert batch_results.error_summary.failed_system_error_lines == 0 for i, line_error in enumerate(batch_results.error_summary.error_list): assert isinstance(line_error, LineError) assert line_error.error["message"] == f"Line {i} execution timeout for exceeding 5 seconds" assert line_error.error["code"] == "UserError" @pytest.mark.parametrize( "flow_folder", [ ONE_LINE_OF_BULK_TEST_TIMEOUT, ], ) def test_batch_with_one_line_timeout(self, flow_folder, dev_connections): mem_run_storage = MemoryRunStorage() batch_engine = BatchEngine( get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections, storage=mem_run_storage, ) batch_engine._line_timeout_sec = 5 # set line timeout to 5 seconds for testing # prepare input file and output dir input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name="samples.json")} output_dir = Path(mkdtemp()) inputs_mapping = {"idx": "${data.idx}"} batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir) assert isinstance(batch_results, BatchResult) # assert the line status in batch result assert batch_results.status == Status.Completed assert batch_results.total_lines == 3 assert batch_results.completed_lines == 2 assert batch_results.failed_lines == 1 assert batch_results.node_status == { "my_python_tool_with_failed_line.completed": 2, "my_python_tool_with_failed_line.canceled": 1, "my_python_tool.completed": 3, } # assert the error summary in batch result assert batch_results.error_summary.failed_user_error_lines == 1 assert batch_results.error_summary.failed_system_error_lines == 0 assert isinstance(batch_results.error_summary.error_list[0], LineError) assert batch_results.error_summary.error_list[0].line_number == 2 assert batch_results.error_summary.error_list[0].error["code"] == "UserError" assert batch_results.error_summary.error_list[0].error["referenceCode"] == "Executor" assert batch_results.error_summary.error_list[0].error["innerError"]["code"] == "LineExecutionTimeoutError" assert ( batch_results.error_summary.error_list[0].error["message"] == "Line 2 execution timeout for exceeding 5 seconds" ) # assert mem_run_storage persists run infos correctly assert len(mem_run_storage._flow_runs) == 3, "Flow runs are persisted in memory storage." assert len(mem_run_storage._node_runs) == 6, "Node runs are persisted in memory storage." @pytest.mark.parametrize( "flow_folder, line_timeout_sec, batch_timeout_sec, expected_error", [ (ONE_LINE_OF_BULK_TEST_TIMEOUT, 600, 5, BatchExecutionTimeoutError(2, 5)), (ONE_LINE_OF_BULK_TEST_TIMEOUT, 3, 600, LineExecutionTimeoutError(2, 3)), (ONE_LINE_OF_BULK_TEST_TIMEOUT, 3, 5, LineExecutionTimeoutError(2, 3)), # TODO: Will change to BatchExecutionTimeoutError after refining the implementation of batch timeout. # (ONE_LINE_OF_BULK_TEST_TIMEOUT, 3, 3, LineExecutionTimeoutError(2, 3)), ], ) def test_batch_timeout(self, flow_folder, line_timeout_sec, batch_timeout_sec, expected_error): mem_run_storage = MemoryRunStorage() batch_engine = BatchEngine( get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections={}, storage=mem_run_storage, ) batch_engine._line_timeout_sec = line_timeout_sec batch_engine._batch_timeout_sec = batch_timeout_sec input_dirs = {"data": get_flow_inputs_file(flow_folder, file_name="samples.json")} output_dir = Path(mkdtemp()) inputs_mapping = {"idx": "${data.idx}"} batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir) assert isinstance(batch_results, BatchResult) # assert the line status in batch result assert batch_results.status == Status.Completed assert batch_results.total_lines == 3 assert batch_results.completed_lines == 2 assert batch_results.failed_lines == 1 assert batch_results.node_status == { "my_python_tool_with_failed_line.completed": 2, "my_python_tool_with_failed_line.canceled": 1, "my_python_tool.completed": 3, } # assert the error summary in batch result assert batch_results.error_summary.failed_user_error_lines == 1 assert batch_results.error_summary.failed_system_error_lines == 0 assert isinstance(batch_results.error_summary.error_list[0], LineError) assert batch_results.error_summary.error_list[0].line_number == 2 actual_error_dict = batch_results.error_summary.error_list[0].error expected_error_dict = ExceptionPresenter.create(expected_error).to_dict() assert actual_error_dict["code"] == expected_error_dict["code"] assert actual_error_dict["message"] == expected_error_dict["message"] assert actual_error_dict["referenceCode"] == expected_error_dict["referenceCode"] assert actual_error_dict["innerError"]["code"] == expected_error_dict["innerError"]["code"] # assert mem_run_storage persists run infos correctly assert len(mem_run_storage._flow_runs) == 3, "Flow runs are persisted in memory storage." # TODO: Currently, the node status is incomplete. # We will assert the correct result after refining the implementation of batch timeout. assert len(mem_run_storage._node_runs) == 6, "Node runs are persisted in memory storage."
promptflow/src/promptflow/tests/executor/e2etests/test_batch_timeout.py/0
{ "file_path": "promptflow/src/promptflow/tests/executor/e2etests/test_batch_timeout.py", "repo_id": "promptflow", "token_count": 3518 }
46
import pytest from promptflow._core.connection_manager import ConnectionManager from promptflow.connections import AzureOpenAIConnection from promptflow.contracts.tool import ConnectionType def get_connection_dict(): return { "azure_open_ai_connection": { "type": "AzureOpenAIConnection", "value": { "api_key": "<azure-openai-key>", "api_base": "<api-base>", "api_type": "azure", "api_version": "2023-07-01-preview", }, }, "custom_connection": { "type": "CustomConnection", "value": { "api_key": "<your-key>", "url": "https://api.bing.microsoft.com/v7.0/search", }, "module": "promptflow.connections", "secret_keys": ["api_key"], }, } @pytest.mark.unittest class TestConnectionManager: def test_build_connections(self): new_connection = get_connection_dict() # Add not exist key new_connection["azure_open_ai_connection"]["value"]["not_exist"] = "test" connection_manager = ConnectionManager(new_connection) assert len(connection_manager._connections) == 2 assert isinstance(connection_manager.get("azure_open_ai_connection"), AzureOpenAIConnection) assert connection_manager.to_connections_dict() == new_connection def test_serialize(self): new_connection = get_connection_dict() connection_manager = ConnectionManager(new_connection) assert ( ConnectionType.serialize_conn(connection_manager.get("azure_open_ai_connection")) == "azure_open_ai_connection" ) assert ConnectionType.serialize_conn(connection_manager.get("custom_connection")) == "custom_connection" def test_get_secret_list(self): new_connection = get_connection_dict() connection_manager = ConnectionManager(new_connection) expected_list = ["<azure-openai-key>", "<your-key>"] assert set(connection_manager.get_secret_list()) == set(expected_list) def test_is_secret(self): new_connection = get_connection_dict() connection_manager = ConnectionManager(new_connection) connection = connection_manager.get("custom_connection") assert connection.is_secret("api_key") is True assert connection.is_secret("url") is False
promptflow/src/promptflow/tests/executor/unittests/_core/test_connection_manager.py/0
{ "file_path": "promptflow/src/promptflow/tests/executor/unittests/_core/test_connection_manager.py", "repo_id": "promptflow", "token_count": 1015 }
47
import os import re import sys from multiprocessing import Pool from pathlib import Path from unittest.mock import patch import pytest from promptflow._core.tool_meta_generator import ( JinjaParsingError, MultipleToolsDefined, NoToolDefined, PythonLoadError, PythonParsingError, generate_prompt_meta, generate_python_meta, generate_tool_meta_dict_by_file, ) from promptflow._utils.exception_utils import ExceptionPresenter from ...utils import FLOW_ROOT, load_json TEST_ROOT = Path(__file__).parent.parent.parent.parent TOOLS_ROOT = TEST_ROOT / "test_configs/wrong_tools" def cd_and_run(working_dir, source_path, tool_type): os.chdir(working_dir) sys.path.insert(0, working_dir) try: return generate_tool_meta_dict_by_file(source_path, tool_type) except Exception as e: return f"({e.__class__.__name__}) {e}" def cd_and_run_with_read_text_error(working_dir, source_path, tool_type): def mock_read_text_error(self: Path, *args, **kwargs): raise Exception("Mock read text error.") os.chdir(working_dir) sys.path.insert(0, working_dir) try: with patch("promptflow._core.tool_meta_generator.Path.read_text", new=mock_read_text_error): return generate_tool_meta_dict_by_file(source_path, tool_type) except Exception as e: return f"({e.__class__.__name__}) {e}" def cd_and_run_with_bad_function_interface(working_dir, source_path, tool_type): def mock_function_to_interface(*args, **kwargs): raise Exception("Mock function to interface error.") os.chdir(working_dir) sys.path.insert(0, working_dir) try: with patch("promptflow._core.tool_meta_generator.function_to_interface", new=mock_function_to_interface): return generate_tool_meta_dict_by_file(source_path, tool_type) except Exception as e: return f"({e.__class__.__name__}) {e}" def generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, func): with Pool(1) as pool: return pool.apply(func, (wd, tool_path, tool_type)) @pytest.mark.unittest class TestToolMetaUtils: @pytest.mark.parametrize( "flow_dir, tool_path, tool_type", [ ("prompt_tools", "summarize_text_content_prompt.jinja2", "prompt"), ("prompt_tools", "summarize_text_content_prompt.jinja2", "llm"), ("script_with_import", "dummy_utils/main.py", "python"), ("script_with___file__", "script_with___file__.py", "python"), ("script_with_special_character", "script_with_special_character.py", "python"), ], ) def test_generate_tool_meta_dict_by_file(self, flow_dir, tool_path, tool_type): wd = str((FLOW_ROOT / flow_dir).resolve()) meta_dict = generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, cd_and_run) assert isinstance(meta_dict, dict), "Call cd_and_run failed:\n" + meta_dict target_file = (Path(wd) / tool_path).with_suffix(".meta.json") expected_dict = load_json(target_file) if tool_type == "llm": expected_dict["type"] = "llm" # We use prompt as default for jinja2 assert meta_dict == expected_dict @pytest.mark.parametrize( "flow_dir, tool_path, tool_type, func, msg_pattern", [ pytest.param( "prompt_tools", "summarize_text_content_prompt.jinja2", "python", cd_and_run, r"\(PythonLoaderNotFound\) Failed to load python file '.*summarize_text_content_prompt.jinja2'. " r"Please make sure it is a valid .py file.", id="PythonLoaderNotFound", ), pytest.param( "script_with_import", "fail.py", "python", cd_and_run, r"\(PythonLoadError\) Failed to load python module from file '.*fail.py': " r"\(ModuleNotFoundError\) No module named 'aaa'", id="PythonLoadError", ), pytest.param( "simple_flow_with_python_tool", "divide_num.py", "python", cd_and_run_with_bad_function_interface, r"\(BadFunctionInterface\) Parse interface for tool 'divide_num' failed: " r"\(Exception\) Mock function to interface error.", id="BadFunctionInterface", ), pytest.param( "script_with_import", "aaa.py", "python", cd_and_run, r"\(MetaFileNotFound\) Generate tool meta failed for python tool. " r"Meta file 'aaa.py' can not be found.", id="MetaFileNotFound", ), pytest.param( "simple_flow_with_python_tool", "divide_num.py", "python", cd_and_run_with_read_text_error, r"\(MetaFileReadError\) Generate tool meta failed for python tool. " r"Read meta file 'divide_num.py' failed: \(Exception\) Mock read text error.", id="MetaFileReadError", ), pytest.param( "simple_flow_with_python_tool", "divide_num.py", "action", cd_and_run, r"\(NotSupported\) Generate tool meta failed. The type 'action' is currently unsupported. " r"Please choose from available types: python,llm,prompt and try again.", id="NotSupported", ), ], ) def test_generate_tool_meta_dict_by_file_exception(self, flow_dir, tool_path, tool_type, func, msg_pattern): wd = str((FLOW_ROOT / flow_dir).resolve()) ret = generate_tool_meta_dict_by_file_with_cd(wd, tool_path, tool_type, func) assert isinstance(ret, str), "Call cd_and_run should fail but succeeded:\n" + str(ret) assert re.match(msg_pattern, ret) @pytest.mark.parametrize( "content, error_code, message", [ pytest.param( "zzz", PythonParsingError, "Failed to load python module. Python parsing failed: (NameError) name 'zzz' is not defined", id="PythonParsingError_NameError", ), pytest.param( "# Nothing", NoToolDefined, "No tool found in the python script. " "Please make sure you have one and only one tool definition in your script.", id="NoToolDefined", ), pytest.param( "multiple_tools.py", MultipleToolsDefined, "Expected 1 but collected 2 tools: tool1, tool2. " "Please make sure you have one and only one tool definition in your script.", id="MultipleToolsDefined", ), pytest.param( "{% zzz", PythonParsingError, "Failed to load python module. Python parsing failed: " "(SyntaxError) invalid syntax (<string>, line 1)", id="PythonParsingError_SyntaxError", ), ], ) def test_custom_python_meta(self, content, error_code, message) -> None: if content.endswith(".py"): source = TOOLS_ROOT / content with open(source, "r") as f: code = f.read() else: code = content source = None with pytest.raises(error_code) as ex: generate_python_meta("some_tool", code, source) assert message == str(ex.value) @pytest.mark.parametrize( "content, error_code, message", [ pytest.param( "{% zzz", JinjaParsingError, "Generate tool meta failed for llm tool. Jinja parsing failed at line 1: " "(TemplateSyntaxError) Encountered unknown tag 'zzz'.", id="JinjaParsingError_Code", ), pytest.param( "no_end.jinja2", JinjaParsingError, "Generate tool meta failed for llm tool. Jinja parsing failed at line 2: " "(TemplateSyntaxError) Unexpected end of template. Jinja was looking for the following tags: " "'endfor' or 'else'. The innermost block that needs to be closed is 'for'.", id="JinjaParsingError_File", ), ], ) def test_custom_llm_meta(self, content, error_code, message) -> None: if content.endswith(".jinja2"): with open(TOOLS_ROOT / content, "r") as f: code = f.read() else: code = content with pytest.raises(error_code) as ex: generate_prompt_meta("some_tool", code) assert message == str(ex.value) @pytest.mark.parametrize( "content, error_code, message", [ pytest.param( "{% zzz", JinjaParsingError, "Generate tool meta failed for prompt tool. Jinja parsing failed at line 1: " "(TemplateSyntaxError) Encountered unknown tag 'zzz'.", id="JinjaParsingError_Code", ), pytest.param( "no_end.jinja2", JinjaParsingError, "Generate tool meta failed for prompt tool. Jinja parsing failed at line 2: " "(TemplateSyntaxError) Unexpected end of template. Jinja was looking for the following tags: " "'endfor' or 'else'. The innermost block that needs to be closed is 'for'.", id="JinjaParsingError_File", ), ], ) def test_custom_prompt_meta(self, content, error_code, message) -> None: if content.endswith(".jinja2"): with open(TOOLS_ROOT / content, "r") as f: code = f.read() else: code = content with pytest.raises(error_code) as ex: generate_prompt_meta("some_tool", code, prompt_only=True) assert message == str(ex.value) @pytest.mark.unittest class TestPythonLoadError: def test_additional_info(self): source = TOOLS_ROOT / "load_error.py" with open(source, "r") as f: code = f.read() with pytest.raises(PythonLoadError) as ex: generate_python_meta("some_tool", code, source) additional_info = ExceptionPresenter.create(ex.value).to_dict().get("additionalInfo") assert len(additional_info) == 1 info_0 = additional_info[0] assert info_0["type"] == "UserCodeStackTrace" info_0_value = info_0["info"] assert info_0_value.get("type") == "ZeroDivisionError" assert info_0_value.get("message") == "division by zero" assert re.match(r".*load_error.py", info_0_value["filename"]) assert info_0_value.get("lineno") == 3 assert info_0_value.get("name") == "<module>" assert re.search( r"Traceback \(most recent call last\):\n" r' File ".*load_error.py", line .*, in <module>\n' r" 1 / 0\n" r"(.*\n)?" # Python >= 3.11 add extra line here like a pointer. r"ZeroDivisionError: division by zero\n", info_0_value.get("traceback"), ) def test_additional_info_for_empty_inner_error(self): ex = PythonLoadError(message_format="Test empty error") additional_info = ExceptionPresenter.create(ex).to_dict().get("additionalInfo") assert additional_info is None
promptflow/src/promptflow/tests/executor/unittests/_utils/test_generate_tool_meta_utils.py/0
{ "file_path": "promptflow/src/promptflow/tests/executor/unittests/_utils/test_generate_tool_meta_utils.py", "repo_id": "promptflow", "token_count": 5664 }
48
from datetime import datetime import pytest from promptflow.contracts.run_info import FlowRunInfo, RunInfo, Status @pytest.mark.unittest class TestStatus: @pytest.mark.parametrize( "status,expected", [ (Status.Completed, True), (Status.Failed, True), (Status.Bypassed, True), (Status.Canceled, True), (Status.Running, False), (Status.Preparing, False), (Status.NotStarted, False), (Status.CancelRequested, False), (123, False), ], ) def test_status_is_terminated(self, status, expected): assert Status.is_terminated(status) == expected @pytest.mark.unittest class TestRunInfo: def test_creation(self): run_info = RunInfo( node="node1", flow_run_id="123", run_id="123:456", status=Status.Running, inputs=[], output={}, metrics={}, error={}, parent_run_id="789", start_time=datetime.now(), end_time=datetime.now(), system_metrics={}, ) assert run_info.node == "node1" assert run_info.flow_run_id == "123" assert run_info.run_id == "123:456" assert run_info.status == Status.Running def test_deserialize(self): run_info_dict = { "node": "get_answer", "flow_run_id": "", "run_id": "dummy_run_id", "status": "Completed", "inputs": {"question": "string"}, "output": "Hello world: What's promptflow?", "metrics": None, "error": None, "parent_run_id": "dummy_flow_run_id", "start_time": "2023-11-24T06:03:20.2688262Z", "end_time": "2023-11-24T06:03:20.268858Z", "index": 0, "api_calls": None, "variant_id": "", "cached_run_id": None, "cached_flow_run_id": None, "logs": None, "system_metrics": {"duration": "00:00:00.0000318", "total_tokens": 0}, "result": "Hello world: What's promptflow?", } run_info = RunInfo.deserialize(run_info_dict) assert run_info.index == 0 assert isinstance(run_info.start_time, datetime) and isinstance(run_info.end_time, datetime) assert run_info.status == Status.Completed assert run_info.run_id == "dummy_run_id" assert run_info.api_calls is None assert run_info.system_metrics == {"duration": "00:00:00.0000318", "total_tokens": 0} assert run_info.output == "Hello world: What's promptflow?" @pytest.mark.unittest class TestFlowRunInfo: def test_creation(self): flow_run_info = FlowRunInfo( run_id="123:456", status=Status.Running, error={}, inputs={}, output={}, metrics={}, request={}, parent_run_id="789", root_run_id="123", source_run_id="456", flow_id="flow1", start_time=datetime.now(), end_time=datetime.now(), system_metrics={}, upload_metrics=False, ) assert flow_run_info.run_id == "123:456" assert flow_run_info.status == Status.Running assert flow_run_info.flow_id == "flow1" def test_deserialize(self): flow_run_info_dict = { "run_id": "dummy_run_id", "status": "Completed", "error": None, "inputs": {"question": "What's promptflow?"}, "output": {"answer": "Hello world: What's promptflow?"}, "metrics": None, "request": None, "parent_run_id": None, "root_run_id": None, "source_run_id": None, "flow_id": "Flow", "start_time": "2023-11-23T10:58:37.9436245Z", "end_time": "2023-11-23T10:58:37.9590789Z", "index": 0, "api_calls": None, "variant_id": "", "name": "", "description": "", "tags": None, "system_metrics": {"duration": "00:00:00.0154544", "total_tokens": 0}, "result": {"answer": "Hello world: What's promptflow?"}, "upload_metrics": False, } flow_run_info = FlowRunInfo.deserialize(flow_run_info_dict) assert flow_run_info.index == 0 assert isinstance(flow_run_info.start_time, datetime) and isinstance(flow_run_info.end_time, datetime) assert flow_run_info.status == Status.Completed assert flow_run_info.run_id == "dummy_run_id" assert flow_run_info.api_calls is None assert flow_run_info.system_metrics == {"duration": "00:00:00.0154544", "total_tokens": 0} assert flow_run_info.output == {"answer": "Hello world: What's promptflow?"}
promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_info.py/0
{ "file_path": "promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_info.py", "repo_id": "promptflow", "token_count": 2527 }
49
import re from pathlib import Path import pydash import pytest from promptflow._utils.yaml_utils import dump_yaml, load_yaml_string from promptflow.connections import AzureOpenAIConnection from .._azure_utils import DEFAULT_TEST_TIMEOUT, PYTEST_TIMEOUT_METHOD from ..recording_utilities import is_live PROMOTFLOW_ROOT = Path(__file__) / "../../../.." TEST_ROOT = Path(__file__).parent.parent.parent MODEL_ROOT = TEST_ROOT / "test_configs/e2e_samples" CONNECTION_FILE = (PROMOTFLOW_ROOT / "connections.json").resolve().absolute().as_posix() def assert_dict_equals_with_skip_fields(item1, item2, skip_fields): for fot_key in skip_fields: pydash.set_(item1, fot_key, None) pydash.set_(item2, fot_key, None) assert item1 == item2 def normalize_arm_id(origin_value: str): if origin_value: m = re.match( r"(.*)/subscriptions/[a-z0-9\-]+/resourceGroups/[a-z0-9\-]+/providers/" r"Microsoft.MachineLearningServices/workspaces/[a-z0-9\-]+/([a-z]+)/[^/]+/versions/([a-z0-9\-]+)", origin_value, ) if m: prefix, asset_type, _ = m.groups() return ( f"{prefix}/subscriptions/xxx/resourceGroups/xxx/providers/" f"Microsoft.MachineLearningServices/workspaces/xxx/{asset_type}/xxx/versions/xxx" ) return None def update_saved_spec(component, saved_spec_path: str): yaml_text = component._to_yaml() saved_spec_path = Path(saved_spec_path) yaml_content = load_yaml_string(yaml_text) if yaml_content.get("creation_context"): for key in yaml_content.get("creation_context"): yaml_content["creation_context"][key] = "xxx" for key in ["task.code", "task.environment", "id"]: target_value = normalize_arm_id(pydash.get(yaml_content, key)) if target_value: pydash.set_(yaml_content, key, target_value) yaml_text = dump_yaml(yaml_content) if saved_spec_path.is_file(): current_spec_text = saved_spec_path.read_text() if current_spec_text == yaml_text: return saved_spec_path.parent.mkdir(parents=True, exist_ok=True) saved_spec_path.write_text(yaml_text) @pytest.mark.skipif( condition=not is_live(), reason="flow in pipeline tests require secrets config file, only run in live mode.", ) @pytest.mark.usefixtures("use_secrets_config_file") @pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD) @pytest.mark.e2etest class TestFlowInAzureML: @pytest.mark.parametrize( "load_params, expected_spec_attrs", [ pytest.param( { "name": "web_classification_4", "version": "1.0.0", "description": "Create flows that use large language models to " "classify URLs into multiple categories.", "environment_variables": { "verbose": "true", }, }, { "name": "web_classification_4", "version": "1.0.0", "description": "Create flows that use large language models to " "classify URLs into multiple categories.", "type": "parallel", }, id="parallel", ), ], ) def test_flow_as_component( self, azure_open_ai_connection: AzureOpenAIConnection, temp_output_dir, ml_client, load_params: dict, expected_spec_attrs: dict, request, ) -> None: # keep the simplest test here, more tests are in azure-ai-ml from azure.ai.ml import load_component flows_dir = "./tests/test_configs/flows" flow_func: Component = load_component( f"{flows_dir}/web_classification/flow.dag.yaml", params_override=[load_params] ) # TODO: snapshot of flow component changed every time? created_component = ml_client.components.create_or_update(flow_func, is_anonymous=True) update_saved_spec( created_component, f"./tests/test_configs/flows/saved_component_spec/{request.node.callspec.id}.yaml" ) component_dict = created_component._to_dict() slimmed_created_component_attrs = {key: pydash.get(component_dict, key) for key in expected_spec_attrs.keys()} assert slimmed_created_component_attrs == expected_spec_attrs
promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py/0
{ "file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py", "repo_id": "promptflow", "token_count": 2108 }
50
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import shutil import tempfile import uuid from pathlib import Path import pytest from mock.mock import Mock from promptflow._sdk._load_functions import load_run from promptflow._sdk._vendor import get_upload_files_from_folder from promptflow._utils.flow_utils import load_flow_dag from promptflow.azure._constants._flow import ENVIRONMENT, PYTHON_REQUIREMENTS_TXT from promptflow.azure._entities._flow import Flow tests_root_dir = Path(__file__).parent.parent.parent FLOWS_DIR = (tests_root_dir / "test_configs/flows").resolve() RUNS_DIR = (tests_root_dir / "test_configs/runs").resolve() def load_flow(source): from promptflow.azure._load_functions import load_flow return load_flow(source=source) @pytest.mark.unittest class TestFlow: @pytest.mark.skip(reason="TODO: add back when we bring back meta.yaml") def test_load_flow(self): local_file = tests_root_dir / "test_configs/flows/meta_files/flow.meta.yaml" flow = load_flow(source=local_file) assert flow._to_dict() == { "name": "web_classificiation_flow_3", "description": "Create flows that use large language models to classify URLs into multiple categories.", "display_name": "Web Classification", "type": "default", "path": "./flow.dag.yaml", } rest_dict = flow._to_rest_object().as_dict() assert rest_dict == { "description": "Create flows that use large language models to classify URLs into multiple categories.", "flow_name": "Web Classification", "flow_run_settings": {}, "flow_type": "default", "is_archived": True, "flow_definition_file_path": "./flow.dag.yaml", } @pytest.mark.skip(reason="TODO: add back when we bring back meta.yaml") def test_load_flow_from_remote_storage(self): from promptflow.azure.operations._flow_operations import FlowOperations local_file = tests_root_dir / "test_configs/flows/meta_files/remote_fs.meta.yaml" flow = load_flow(source=local_file) assert flow._to_dict() == { "name": "classification_accuracy_eval", "path": "azureml://datastores/workspaceworkingdirectory/paths/Users/wanhan/my_flow_snapshot/flow.dag.yaml", "type": "evaluation", } FlowOperations._try_resolve_code_for_flow(flow, Mock()) rest_dict = flow._to_rest_object().as_dict() assert rest_dict == { "flow_definition_file_path": "Users/wanhan/my_flow_snapshot/flow.dag.yaml", "flow_run_settings": {}, "flow_type": "evaluation", "is_archived": True, } def test_ignore_files_in_flow(self): local_file = tests_root_dir / "test_configs/flows/web_classification" with tempfile.TemporaryDirectory() as temp: flow_path = Path(temp) / "flow" shutil.copytree(local_file, flow_path) assert (Path(temp) / "flow/.promptflow/flow.tools.json").exists() (Path(flow_path) / ".runs").mkdir(parents=True) (Path(flow_path) / ".runs" / "mock.file").touch() flow = load_flow(source=flow_path) with flow._build_code() as code: assert code is not None upload_paths = get_upload_files_from_folder( path=code.path, ignore_file=code._ignore_file, ) flow_files = list(sorted([item[1] for item in upload_paths])) # assert that .runs/mock.file are ignored assert ".runs/mock.file" not in flow_files # Web classification may be executed and include flow.detail.json, flow.logs, flow.outputs.json assert all( file in flow_files for file in [ ".promptflow/flow.tools.json", "classify_with_llm.jinja2", "convert_to_dict.py", "fetch_text_content_from_url.py", "fetch_text_content_from_url_input.jsonl", "flow.dag.yaml", "prepare_examples.py", "samples.json", "summarize_text_content.jinja2", "summarize_text_content__variant_1.jinja2", "webClassification20.csv", ] ) def test_load_yaml_run_with_resources(self): source = f"{RUNS_DIR}/sample_bulk_run_with_resources.yaml" run = load_run(source=source, params_override=[{"name": str(uuid.uuid4())}]) assert run._resources["instance_type"] == "Standard_D2" assert run._resources["idle_time_before_shutdown_minutes"] == 60 def test_flow_with_additional_includes(self): flow_folder = FLOWS_DIR / "web_classification_with_additional_include" flow = load_flow(source=flow_folder) with flow._build_code() as code: assert code is not None _, temp_flow = load_flow_dag(code.path) assert "additional_includes" not in temp_flow upload_paths = get_upload_files_from_folder( path=code.path, ignore_file=code._ignore_file, ) flow_files = list(sorted([item[1] for item in upload_paths])) target_additional_includes = [ "convert_to_dict.py", "fetch_text_content_from_url.py", "summarize_text_content.jinja2", "external_files/convert_to_dict.py", "external_files/fetch_text_content_from_url.py", "external_files/summarize_text_content.jinja2", ] # assert all additional includes are included for file in target_additional_includes: assert file in flow_files def test_flow_with_ignore_file(self): flow_folder = FLOWS_DIR / "flow_with_ignore_file" flow = load_flow(source=flow_folder) with flow._build_code() as code: assert code is not None upload_paths = get_upload_files_from_folder( path=code.path, ignore_file=code._ignore_file, ) flow_files = list(sorted([item[1] for item in upload_paths])) assert len(flow_files) > 0 target_ignored_files = ["ignored_folder/1.txt", "random.ignored"] # assert all ignored files are ignored for file in target_ignored_files: assert file not in flow_files def test_resolve_requirements(self): flow_dag = {} # Test when requirements.txt does not exist assert not Flow._resolve_requirements(flow_path=FLOWS_DIR / "flow_with_ignore_file", flow_dag=flow_dag) # Test when requirements.txt exists but already added to flow_dag flow_dag[ENVIRONMENT] = {PYTHON_REQUIREMENTS_TXT: "another_requirements.txt"} assert not Flow._resolve_requirements(flow_path=FLOWS_DIR / "flow_with_requirements_txt", flow_dag=flow_dag) # Test when requirements.txt exists and not added to flow_dag flow_dag = {} assert Flow._resolve_requirements(flow_path=FLOWS_DIR / "flow_with_requirements_txt", flow_dag=flow_dag) def test_resolve_requirements_for_flow(self): with tempfile.TemporaryDirectory() as temp: temp = Path(temp) # flow without environment section flow_folder = FLOWS_DIR / "flow_with_requirements_txt" shutil.copytree(flow_folder, temp / "flow_with_requirements_txt") flow_folder = temp / "flow_with_requirements_txt" flow = load_flow(source=flow_folder) with flow._build_code(): _, flow_dag = load_flow_dag(flow_path=flow_folder) assert flow_dag[ENVIRONMENT] == {"python_requirements_txt": "requirements.txt"} _, flow_dag = load_flow_dag(flow_path=flow_folder) assert ENVIRONMENT not in flow_dag # flow with environment section flow_folder = FLOWS_DIR / "flow_with_requirements_txt_and_env" shutil.copytree(flow_folder, temp / "flow_with_requirements_txt_and_env") flow_folder = temp / "flow_with_requirements_txt_and_env" flow = load_flow(source=flow_folder) with flow._build_code(): _, flow_dag = load_flow_dag(flow_path=flow_folder) assert flow_dag[ENVIRONMENT] == { "image": "python:3.8-slim", "python_requirements_txt": "requirements.txt", } _, flow_dag = load_flow_dag(flow_path=flow_folder) assert flow_dag[ENVIRONMENT] == {"image": "python:3.8-slim"}
promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_flow_entity.py/0
{ "file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_flow_entity.py", "repo_id": "promptflow", "token_count": 4148 }
51
import contextlib import io import multiprocessing import os import sys import tempfile import timeit import uuid from pathlib import Path from unittest import mock import pytest from promptflow._cli._user_agent import USER_AGENT as CLI_USER_AGENT # noqa: E402 from promptflow._sdk._telemetry import log_activity from promptflow._sdk._utils import ClientUserAgentUtil FLOWS_DIR = "./tests/test_configs/flows" CONNECTIONS_DIR = "./tests/test_configs/connections" DATAS_DIR = "./tests/test_configs/datas" def mock_log_activity(*args, **kwargs): custom_message = "github run: https://github.com/microsoft/promptflow/actions/runs/{0}".format( os.environ.get("GITHUB_RUN_ID") ) if len(args) == 4: if args[3] is not None: args[3]["custom_message"] = custom_message else: args = list(args) args[3] = {"custom_message": custom_message} elif "custom_dimensions" in kwargs and kwargs["custom_dimensions"] is not None: kwargs["custom_dimensions"]["custom_message"] = custom_message else: kwargs["custom_dimensions"] = {"custom_message": custom_message} return log_activity(*args, **kwargs) def run_cli_command(cmd, time_limit=3600, result_queue=None): from promptflow._cli._pf.entry import main sys.argv = list(cmd) output = io.StringIO() st = timeit.default_timer() with contextlib.redirect_stdout(output), mock.patch.object( ClientUserAgentUtil, "get_user_agent" ) as get_user_agent_fun, mock.patch( "promptflow._sdk._telemetry.activity.log_activity", side_effect=mock_log_activity ), mock.patch( "promptflow._cli._pf.entry.log_activity", side_effect=mock_log_activity ): # Client side will modify user agent only through ClientUserAgentUtil to avoid impact executor/runtime. get_user_agent_fun.return_value = f"{CLI_USER_AGENT} perf_monitor/1.0" user_agent = ClientUserAgentUtil.get_user_agent() assert user_agent == f"{CLI_USER_AGENT} perf_monitor/1.0" main() ed = timeit.default_timer() print(f"{cmd}, \n Total time: {ed - st}s") assert ed - st < time_limit, f"The time limit is {time_limit}s, but it took {ed - st}s." res_value = output.getvalue() if result_queue: result_queue.put(res_value) return res_value def subprocess_run_cli_command(cmd, time_limit=3600): result_queue = multiprocessing.Queue() process = multiprocessing.Process( target=run_cli_command, args=(cmd,), kwargs={"time_limit": time_limit, "result_queue": result_queue} ) process.start() process.join() assert process.exitcode == 0 return result_queue.get_nowait() @pytest.mark.usefixtures("use_secrets_config_file", "setup_local_connection") @pytest.mark.perf_monitor_test class TestCliPerf: def test_pf_run_create(self, time_limit=20) -> None: res = subprocess_run_cli_command( cmd=( "pf", "run", "create", "--flow", f"{FLOWS_DIR}/print_input_flow", "--data", f"{DATAS_DIR}/print_input_flow.jsonl", ), time_limit=time_limit, ) assert "Completed" in res def test_pf_run_update(self, time_limit=10) -> None: run_name = str(uuid.uuid4()) run_cli_command( cmd=( "pf", "run", "create", "--flow", f"{FLOWS_DIR}/print_input_flow", "--data", f"{DATAS_DIR}/print_input_flow.jsonl", "--name", run_name, ) ) res = subprocess_run_cli_command( cmd=("pf", "run", "update", "--name", run_name, "--set", "description=test pf run update"), time_limit=time_limit, ) assert "Completed" in res def test_pf_flow_test(self, time_limit=10): subprocess_run_cli_command( cmd=( "pf", "flow", "test", "--flow", f"{FLOWS_DIR}/print_input_flow", "--inputs", "text=https://www.youtube.com/watch?v=o5ZQyXaAv1g", ), time_limit=time_limit, ) output_path = Path(FLOWS_DIR) / "print_input_flow" / ".promptflow" / "flow.output.json" assert output_path.exists() def test_pf_flow_build(self, time_limit=20): with tempfile.TemporaryDirectory() as temp_dir: subprocess_run_cli_command( cmd=( "pf", "flow", "build", "--source", f"{FLOWS_DIR}/print_input_flow/flow.dag.yaml", "--output", temp_dir, "--format", "docker", ), time_limit=time_limit, ) def test_pf_connection_create(self, time_limit=10): name = f"Connection_{str(uuid.uuid4())[:4]}" res = subprocess_run_cli_command( cmd=( "pf", "connection", "create", "--file", f"{CONNECTIONS_DIR}/azure_openai_connection.yaml", "--name", f"{name}", ), time_limit=time_limit, ) assert "api_type" in res def test_pf_connection_list(self, time_limit=10): name = "connection_list" res = run_cli_command( cmd=( "pf", "connection", "create", "--file", f"{CONNECTIONS_DIR}/azure_openai_connection.yaml", "--name", f"{name}", ) ) assert "api_type" in res res = subprocess_run_cli_command(cmd=("pf", "connection", "list"), time_limit=time_limit) assert "api_type" in res
promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_cli_perf.py/0
{ "file_path": "promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_cli_perf.py", "repo_id": "promptflow", "token_count": 3111 }
52
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import hashlib import json import os import shelve from pathlib import Path from typing import Dict from filelock import FileLock from promptflow.exceptions import PromptflowException from .constants import ENVIRON_TEST_MODE, RecordMode class RecordItemMissingException(PromptflowException): """Exception raised when record item missing.""" pass class RecordFileMissingException(PromptflowException): """Exception raised when record file missing or invalid.""" pass class RecordStorage(object): """ RecordStorage is used to store the record of node run. File often stored in .promptflow/node_cache.shelve Currently only text input/output could be recorded. Example of cached items: { "/record/file/resolved": { "hash_value": { # hash_value is sha1 of dict, accelerate the search "input": { "key1": "value1", # Converted to string, type info dropped }, "output": "output_convert_to_string", "output_type": "output_type" # Currently support only simple strings. } } } """ _standard_record_folder = ".promptflow" _standard_record_name = "node_cache.shelve" _instance = None def __init__(self, record_file: str = None): """ RecordStorage is used to store the record of node run. """ self._record_file: Path = None self.cached_items: Dict[str, Dict[str, Dict[str, object]]] = {} self.record_file = record_file @property def record_file(self) -> Path: return self._record_file @record_file.setter def record_file(self, record_file_input) -> None: """ Will load record_file if exist. """ if record_file_input == self._record_file: return if isinstance(record_file_input, str): self._record_file = Path(record_file_input).resolve() elif isinstance(record_file_input, Path): self._record_file = record_file_input.resolve() else: return if not self._record_file.parts[-1].endswith(RecordStorage._standard_record_name): record_folder = self._record_file / RecordStorage._standard_record_folder self._record_file = record_folder / RecordStorage._standard_record_name else: record_folder = self._record_file.parent self._record_file_str = str(self._record_file.resolve()) # cache folder we could create if not exist. if not record_folder.exists(): record_folder.mkdir(parents=True, exist_ok=True) # if file exist, load file if self.exists_record_file(record_folder, self._record_file.parts[-1]): self._load_file() else: self.cached_items = { self._record_file_str: {}, } def exists_record_file(self, record_folder, file_name) -> bool: files = os.listdir(record_folder) for file in files: if file.startswith(file_name): return True return False def _write_file(self, hashkey) -> None: file_content = self.cached_items.get(self._record_file_str, None) if file_content is not None: file_content_line = file_content.get(hashkey, None) if file_content_line is not None: lock = FileLock(self.record_file.parent / "record_file.lock") with lock: saved_dict = shelve.open(self._record_file_str, "c", writeback=False) saved_dict[hashkey] = file_content_line saved_dict.close() else: raise RecordItemMissingException(f"Record item not found in cache with hashkey {hashkey}.") else: raise RecordFileMissingException( f"This exception should not happen here, but record file is not found {self._record_file_str}." ) def _load_file(self) -> None: local_content = self.cached_items.get(self._record_file_str, None) if not local_content: if RecordStorage.is_recording_mode(): lock = FileLock(self.record_file.parent / "record_file.lock") with lock: if not self.exists_record_file(self.record_file.parent, self.record_file.parts[-1]): return self.cached_items[self._record_file_str] = {} saved_dict = shelve.open(self._record_file_str, "r", writeback=False) for key, value in saved_dict.items(): self.cached_items[self._record_file_str][key] = value saved_dict.close() else: if not self.exists_record_file(self.record_file.parent, self.record_file.parts[-1]): return self.cached_items[self._record_file_str] = {} saved_dict = shelve.open(self._record_file_str, "r", writeback=False) for key, value in saved_dict.items(): self.cached_items[self._record_file_str][key] = value saved_dict.close() def delete_lock_file(self): lock_file = self.record_file.parent / "record_file.lock" if lock_file.exists(): os.remove(lock_file) def get_record(self, input_dict: Dict) -> object: """ Get record from local storage. :param input_dict: input dict of critical AOAI inputs :type input_dict: Dict :raises RecordFileMissingException: Record file not exist :raises RecordItemMissingException: Record item not exist in record file :return: original output of node run :rtype: object """ input_dict = self._recursive_create_hashable_args(input_dict) hash_value: str = hashlib.sha1(str(sorted(input_dict.items())).encode("utf-8")).hexdigest() current_saved_records: Dict[str, str] = self.cached_items.get(self._record_file_str, None) if current_saved_records is None: raise RecordFileMissingException(f"Record file not found {self.record_file}.") saved_output = current_saved_records.get(hash_value, None) if saved_output is None: raise RecordItemMissingException( f"Record item not found in file {self.record_file}.\n" f"values: {json.dumps(input_dict)}\n" ) # not all items are reserved in the output dict. output = saved_output["output"] output_type = saved_output["output_type"] if "generator" in output_type: return self._create_output_generator(output, output_type) else: return output def _recursive_create_hashable_args(self, item): if isinstance(item, tuple): return [self._recursive_create_hashable_args(i) for i in item] if isinstance(item, list): return [self._recursive_create_hashable_args(i) for i in item] if isinstance(item, dict): return {k: self._recursive_create_hashable_args(v) for k, v in item.items()} elif "module: promptflow.connections" in str(item) or "object at" in str(item): return [] else: return item def _parse_output_generator(self, output): """ Special handling for generator type. Since pickle will not work for generator. Returns the real list for reocrding, and create a generator for original output. Parse output has a simplified hypothesis: output is simple dict, list or generator, because a full schema of output is too heavy to handle. Example: {"answer": <generator>, "a": "b"}, <generator> """ output_type = "" output_value = None output_generator = None if isinstance(output, dict): output_value = {} output_generator = {} for item in output.items(): k, v = item if type(v).__name__ == "generator": vlist = list(v) def vgenerator(): for vitem in vlist: yield vitem output_value[k] = vlist output_generator[k] = vgenerator() output_type = "dict[generator]" else: output_value[k] = v elif type(output).__name__ == "generator": output_value = list(output) def generator(): for item in output_value: yield item output_generator = generator() output_type = "generator" else: output_value = output output_generator = None output_type = type(output).__name__ return output_value, output_generator, output_type def _create_output_generator(self, output, output_type): """ Special handling for generator type. Returns a generator for original output. Create output has a simplified hypothesis: All list with output type generator is treated as generator. """ output_generator = None if output_type == "dict[generator]": output_generator = {} for k, v in output.items(): if type(v).__name__ == "list": def vgenerator(): for item in v: yield item output_generator[k] = vgenerator() else: output_generator[k] = v elif output_type == "generator": def generator(): for item in output: yield item output_generator = generator() return output_generator def set_record(self, input_dict: Dict, output): """ Set record to local storage, always override the old record. :param input_dict: input dict of critical AOAI inputs :type input_dict: OrderedDict :param output: original output of node run :type output: object """ # filter args, object at will not hash input_dict = self._recursive_create_hashable_args(input_dict) hash_value: str = hashlib.sha1(str(sorted(input_dict.items())).encode("utf-8")).hexdigest() current_saved_records: Dict[str, str] = self.cached_items.get(self._record_file_str, None) output_value, output_generator, output_type = self._parse_output_generator(output) if current_saved_records is None: current_saved_records = {} current_saved_records[hash_value] = { "input": input_dict, "output": output_value, "output_type": output_type, } else: saved_output = current_saved_records.get(hash_value, None) if saved_output is not None: if saved_output["output"] == output_value and saved_output["output_type"] == output_type: if "generator" in output_type: return output_generator else: return output_value else: current_saved_records[hash_value] = { "input": input_dict, "output": output_value, "output_type": output_type, } else: current_saved_records[hash_value] = { "input": input_dict, "output": output_value, "output_type": output_type, } self.cached_items[self._record_file_str] = current_saved_records self._write_file(hash_value) if "generator" in output_type: return output_generator else: return output_value @classmethod def get_test_mode_from_environ(cls) -> str: return os.getenv(ENVIRON_TEST_MODE, RecordMode.LIVE) @classmethod def is_recording_mode(cls) -> bool: return RecordStorage.get_test_mode_from_environ() == RecordMode.RECORD @classmethod def is_replaying_mode(cls) -> bool: return RecordStorage.get_test_mode_from_environ() == RecordMode.REPLAY @classmethod def is_live_mode(cls) -> bool: return RecordStorage.get_test_mode_from_environ() == RecordMode.LIVE @classmethod def get_instance(cls, record_file=None) -> "RecordStorage": """ Use this to get instance to avoid multiple copies of same record storage. :param record_file: initiate at first entrance, defaults to None in the first call will raise exception. :type record_file: str or Path, optional :return: instance of RecordStorage :rtype: RecordStorage """ # if not in recording mode, return None if not (RecordStorage.is_recording_mode() or RecordStorage.is_replaying_mode()): return None # Create instance if not exist if cls._instance is None: if record_file is None: raise RecordFileMissingException("record_file is value None") cls._instance = RecordStorage(record_file) if record_file is not None: cls._instance.record_file = record_file return cls._instance
promptflow/src/promptflow/tests/sdk_cli_test/recording_utilities/record_storage.py/0
{ "file_path": "promptflow/src/promptflow/tests/sdk_cli_test/recording_utilities/record_storage.py", "repo_id": "promptflow", "token_count": 6211 }
53
[run] source = */promptflow/_sdk/_service/* omit = */promptflow/_cli/* */promptflow/azure/* */promptflow/entities/* */promptflow/operations/* *__init__.py*
promptflow/src/promptflow/tests/sdk_pfs_test/.coveragerc/0
{ "file_path": "promptflow/src/promptflow/tests/sdk_pfs_test/.coveragerc", "repo_id": "promptflow", "token_count": 83 }
54
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/CognitiveSearchConnection.schema.json name: my_cognitive_search_connection type: cognitive_search # snake case api_key: "<to-be-replaced>" api_base: "endpoint" api_version: "2023-07-01-Preview"
promptflow/src/promptflow/tests/test_configs/connections/cognitive_search_connection.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/connections/cognitive_search_connection.yaml", "repo_id": "promptflow", "token_count": 93 }
55
{"image": {"data:image/png;path":"logo_1.png"}} {"image": {"data:image/png;path":"logo_2.png"}}
promptflow/src/promptflow/tests/test_configs/datas/image_inputs/inputs.jsonl/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/datas/image_inputs/inputs.jsonl", "repo_id": "promptflow", "token_count": 41 }
56
{"text": "https://www.youtube.com/watch?v=o5ZQyXaAv1g"}
promptflow/src/promptflow/tests/test_configs/datas/print_input_flow.jsonl/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/datas/print_input_flow.jsonl", "repo_id": "promptflow", "token_count": 28 }
57
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- def my_flow(input_val: str): """Simple flow without yaml.""" print(f"Hello world! {input_val}")
promptflow/src/promptflow/tests/test_configs/eager_flows/simple_without_yaml/entry.py/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/simple_without_yaml/entry.py", "repo_id": "promptflow", "token_count": 65 }
58
from typing import List from promptflow import tool @tool def aggregate(processed_results: List[str]): aggregated_results = processed_results # raise error to test aggregation node failed num = 1/0 return aggregated_results
promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/aggregate.py/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/aggregate.py", "repo_id": "promptflow", "token_count": 71 }
59
version: 2 inputs: assistant_input: type: list default: - type: text text: The provided file contains end-of-day (EOD) stock prices for companies A and B across various dates in March. However, it does not include the EOD stock prices for Company C. - type: file_path file_path: path: ./stock_price.csv - type: text text: Please draw a line chart with the stock price of the company A, B and C and return a CVS file with the data. assistant_id: type: string default: asst_eHO2rwEYqGH3pzzHHov2kBCG thread_id: type: string default: "" outputs: assistant_output: type: string reference: ${add_message_and_run.output} thread_id: type: string reference: ${get_or_create_thread.output} nodes: - name: get_or_create_thread type: python source: type: code path: get_or_create_thread.py inputs: conn: chw_openai thread_id: ${inputs.thread_id} - name: add_message_and_run type: python source: type: code path: add_message_and_run.py inputs: conn: chw_openai message: ${inputs.assistant_input} assistant_id: ${inputs.assistant_id} thread_id: ${get_or_create_thread.output} assistant_definition: assistant_definition.yaml download_images: true
promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/flow.dag.yaml", "repo_id": "promptflow", "token_count": 517 }
60
inputs: chat_history: type: list question: type: string is_chat_input: true default: What is ChatGPT? outputs: answer: type: string reference: ${chat_node.output} is_chat_output: true multi_answer: type: string reference: ${chat_node.output} is_chat_output: true nodes: - inputs: deployment_name: gpt-35-turbo max_tokens: "256" temperature: "0.7" chat_history: ${inputs.chat_history} question: ${inputs.question} name: chat_node type: llm source: type: code path: chat.jinja2 api: chat provider: AzureOpenAI connection: azure_open_ai_connection
promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_multi_output_invalid/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_multi_output_invalid/flow.dag.yaml", "repo_id": "promptflow", "token_count": 260 }
61
name: TestPythonToolLongWaitTime inputs: input1: type: bool input2: type: bool input3: type: bool input4: type: bool outputs: output: type: int reference: ${wait_long_1.output} nodes: - name: wait_1 type: python source: type: code path: wait_short.py inputs: throw_exception: ${inputs.input1} - name: wait_2 type: python source: type: code path: wait_short.py inputs: throw_exception: ${inputs.input2} - name: wait_3 type: python source: type: code path: wait_short.py inputs: throw_exception: ${inputs.input3} - name: wait_4 type: python source: type: code path: wait_short.py inputs: throw_exception: ${inputs.input4} - name: wait_long_1 type: python source: type: code path: wait_long.py inputs: text_1: ${wait_1.output} text_2: ${wait_2.output} text_3: ${wait_3.output} text_4: ${wait_4.output}
promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/flow.dag.yaml", "repo_id": "promptflow", "token_count": 427 }
62
from promptflow import tool @tool def tsg_retriever(content: str) -> str: return "TSG: " + content
promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/tsg_retriever.py/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/tsg_retriever.py", "repo_id": "promptflow", "token_count": 35 }
63
inputs: input_str: type: string default: input value from default input_bool: type: bool default: False input_list: type: list default: [] input_dict: type: object default: {} outputs: output: type: string reference: ${test_print_input.output} nodes: - name: test_print_input type: python source: type: code path: test_print_input.py inputs: input_str: ${inputs.input_str} input_bool: ${inputs.input_bool} input_list: ${inputs.input_list} input_dict: ${inputs.input_dict} - name: aggregate_node type: python source: type: code path: test_print_aggregation.py inputs: input_str: ${inputs.input_str} input_bool: ${inputs.input_bool} input_list: ${inputs.input_list} input_dict: ${inputs.input_dict} aggregation: true use_variants: false
promptflow/src/promptflow/tests/test_configs/flows/default_input/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/default_input/flow.dag.yaml", "repo_id": "promptflow", "token_count": 346 }
64
#!/bin/bash echo "$(date -uIns) - promptflow-serve/finish $@" # stop all gunicorn processes echo "$(date -uIns) - Stopping all Gunicorn processes" pkill gunicorn while pgrep gunicorn >/dev/null; do echo "$(date -uIns) - Gunicorn process is still running, waiting for 1s" sleep 1 done echo "$(date -uIns) - Stopped all Gunicorn processes"
promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit/promptflow-serve/finish/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit/promptflow-serve/finish", "repo_id": "promptflow", "token_count": 123 }
65
inputs: key: type: object outputs: output: type: string reference: ${get_dict_val.output.value} nodes: - name: get_dict_val type: python source: type: code path: get_dict_val.py inputs: key: ${inputs.key} - name: print_val type: python source: type: code path: print_val.py inputs: val: ${get_dict_val.output.value} origin_val: ${get_dict_val.output.origin_value}
promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/flow.dag.yaml", "repo_id": "promptflow", "token_count": 181 }
66
{"text": "Hello World!"}
promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_strong_type_connection/data.jsonl/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_strong_type_connection/data.jsonl", "repo_id": "promptflow", "token_count": 9 }
67
inputs: {} outputs: output: type: string reference: ${long_run_node.output} nodes: - name: long_run_node type: python inputs: {} source: type: code path: long_run.py
promptflow/src/promptflow/tests/test_configs/flows/long_run/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/long_run/flow.dag.yaml", "repo_id": "promptflow", "token_count": 80 }
68
inputs: idx: type: int outputs: output: type: int reference: ${my_python_tool_with_failed_line.output} nodes: - name: my_python_tool type: python source: type: code path: my_python_tool.py inputs: idx: ${inputs.idx} - name: my_python_tool_with_failed_line type: python source: type: code path: my_python_tool_with_failed_line.py inputs: idx: ${my_python_tool.output}
promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/flow.dag.yaml/0
{ "file_path": "promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/flow.dag.yaml", "repo_id": "promptflow", "token_count": 181 }
69