text
stringlengths 8
1.72M
| id
stringlengths 22
143
| metadata
dict | __index_level_0__
int64 0
104
|
---|---|---|---|
import os
from promptflow import tool
@tool
def get_env_var(key: str):
if key == "raise":
raise Exception("expected raise!")
print(os.environ.get(key))
# get from env var
return {"value": os.environ.get(key)}
| promptflow/src/promptflow/tests/test_configs/flows/partial_fail/print_env.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/partial_fail/print_env.py",
"repo_id": "promptflow",
"token_count": 92
} | 70 |
inputs:
text:
type: string
outputs:
output_echo:
type: string
reference: ${echo_my_input.output}
nodes:
- name: echo_my_input
type: python
source:
type: code
path: echo_input.py
inputs:
input: ${inputs.text}
| promptflow/src/promptflow/tests/test_configs/flows/python_stream_tools/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_stream_tools/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 103
} | 71 |
from promptflow import tool
@tool
def passthrough(image, call_passthrough: bool = True):
if call_passthrough:
image = passthrough(image, False)
return image
| promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_image_nested_api_calls/passthrough.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_image_nested_api_calls/passthrough.py",
"repo_id": "promptflow",
"token_count": 63
} | 72 |
{
"name": "script_with___file__",
"type": "python",
"inputs": {
"input1": {
"type": [
"string"
]
}
},
"source": "script_with___file__.py",
"function": "my_python_tool"
} | promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/script_with___file__.meta.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/script_with___file__.meta.json",
"repo_id": "promptflow",
"token_count": 103
} | 73 |
inputs:
message:
type: string
default: Hello World!
outputs:
output:
type: object
reference: ${test_assistant_definition.output}
nodes:
- name: test_assistant_definition
type: python
source:
type: code
path: test_assistant_definition.py
inputs:
message: ${inputs.message}
assistant_definition: assistant_definition.yaml
| promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 129
} | 74 |
'aadb0707e9a62b00df9d0d3fecb709ece90a8b67', (0, 2261)
'28e341291f602cdf951239f0152fa9e26deb501b', (67584, 3359)
'e25f352dc22315a0e2c3ee2a3cd629763fc0ae5e', (9216, 3302)
'dc8625906abf86f47f2c52b461f108ab6b98a1cf', (12800, 3795)
'a30838607856c1b8efd522af93ea1edb88f57b4f', (16896, 3671)
'4f7cfc5331a898e9c7abb00e2d0f6f818dd2c999', (20992, 3795)
'06c8709d838389faebb00cc579c7bd1e396f5ab7', (25088, 3886)
'89ef4101e31a12b70fa982b83be01a65ee3c7410', (29184, 3365)
'0304f9ccf7ab8521173b43b526b26412208148b1', (32768, 509)
'914c0cdebdbf3b03a21afe84f9a1af9f99729771', (33280, 1080)
'3ccba72230e4213b2ae31d1df71e45377f5b9fbf', (34816, 3081)
'12dcd3566273156d0071d532410670ca6a13faa8', (38400, 3028)
'95aad7f462a4948f82104b8c6320ced6eeb3a59b', (41472, 3293)
'2c8bece890f55462916146bb89d0df92a4210163', (45056, 3316)
'd1f105fd5701fa3ae65608e6ef732eb2c095d88d', (48640, 3371)
'64b155dfa377148a12addcaabd196edd29620ede', (52224, 3258)
'9a7219e5f8a5e525df2fae3a4df82eb6c6dd7837', (59392, 3870)
'bc6a4f4ae64fa01917085f3e608f0d5c7699308c', (63488, 3610)
'ed29e249e3b86abb1907bd98dc7d5368cc11bfe5', (71168, 3348)
'977d4e91843e6a025d88dc10f5935f3fe50a2aa8', (74752, 3632)
'ac3d32b87a5b6271641f0fdb77dd785d07930207', (78848, 3403)
'60924e7e69449f453fea01c32a84b74521f3bb56', (82432, 3080)
'd52f8bf8badc3432c05395d9f647efa2fede3121', (86016, 1938)
'15b696309e5d753b6123f5521a9c8e4daded18e3', (88064, 3895)
'32edf2341350a6fca9670b7ec6edc208905ec8c1', (92160, 3676)
'cc9ac3eaa4f332a204f39f050c8f83ceba89113d', (96256, 863)
'2c2c8e9b4662215a00c7119e987a6b0049829e2b', (97280, 503)
'9893bb77a07b7fa08f4ab85dd16368863040bc53', (97792, 1966)
'b11590a307351d8ade7df49346a826cfde9c4444', (99840, 3923)
'c8fcd047770466d76018d8b9656c18b7a87a9dcf', (103936, 2255)
'b490dc00a174b4b94e411b32b3b6df8e7291ff44', (106496, 493)
'427f9902c01baef6ef8020ae5e5cb5163c6d36ac', (107008, 307)
'45ea53cf0c4770788d6568a177baf123146dc20b', (107520, 924)
'ebf9f4aad38dceb848df0614195e40870dd5fe6e', (108544, 1043)
'6d6d40bcf3cf2e43d640c75cba40d607f1869b9f', (110080, 1315)
'abb3142dc69818c0c7131c4642f2870ae59bd05a', (111616, 1025)
'8cf128af83ea2aaf4a09890690ab16f3bb347bb3', (113152, 309)
'343332ff6b96d3b1baac23c4e8394a6a965f84b1', (113664, 327)
'd7079ca5b962a7dbca1cfe90f847775652acf6b9', (114176, 961)
'b9bcb73bbe85960e4f492c6b60fd2584de421f91', (115200, 312)
'71d8e363eeac4e3334679f505225acd75ef5607b', (115712, 414)
'9e15e58e4a0b5ff182c0f70761c65fba081c2c2f', (132096, 3373)
'1696e4d8ebc97bfec511fca7754ef717692faa70', (119808, 3623)
'bd36e2e27da8bc212f2213feb0154b6902e1b549', (123904, 3699)
'91f0cff189e34431153edba55e684629b6b56487', (128000, 3808)
'5bd1695cf0739697851e8ca9bd54c0305c7422c4', (135680, 3293)
'33c98924b97b4eed655857dca1472bdfcf36b86a', (139264, 3446)
'3dc18b56b1741937eefaa1069119cdec984e49b7', (142848, 3695)
'255fbbcb72580f8c5eeed07419ed9d9fdd809433', (146944, 3113)
'879388db36d150bfd7f948a8a14011ee96ee7e51', (150528, 3063)
'cf7bb24c6526295ab8e9ba4eea799d98b13dcce5', (153600, 3738)
'6b05f39821053fcbc341404fa47bd2a029862637', (157696, 3725)
'a5abc1d206c9456c8230e382e04c2903756f9be2', (161792, 3690)
'6c9c0aa6ecb54dcdb79b2e4dc0c09321482dde41', (165888, 400)
'70f4fea54805e642e98208d6704425432e00d46d', (166400, 2975)
'7c7b48d39ea68dcc368aa9ad6b0da9c30639c716', (169472, 4085)
'f80e94d98478b637304d5af02356da38b45638da', (173568, 3542)
'ce3be051617aa3d9baaabefbde10655a382725ef', (177152, 579)
'b9f991e14c162d02211acdd3737f60afbf117107', (178176, 517)
'75de0fcefdd6fd5f81c2ccbb80b5e40d81c9c214', (179200, 397)
'fb43cea41f39ffa5ef98f98499c292af14cb92cd', (179712, 1305)
'1af44fd75c83703dc9d960a0874fda3a2fa0e389', (181248, 502)
'ebe34b3760a5502304bb23747079e50d8515141b', (181760, 418)
'83f99b7f16fc8a4548782215c97d500eed44159b', (182272, 997)
'235b8b0ff5ebbf46ab52403b59346dd08717c297', (183296, 3205)
'6dbf6fe28cfe055d3730b847cd603ceb498cc046', (186880, 3055)
'1bc4561b56cd2944414cd994c41dc370630c814c', (189952, 3617)
'29301e2df6aef321aaed0c37f5acf59efdcf0ce5', (194048, 533)
'e68e72e33a357bca30a07bd6d7157209a7ed2f46', (195072, 3385)
'a69786b6c83c69110a94fa92571d56716831b89b', (198656, 1010)
'9113b6564602c0961af986fcfd51af95e7aa0d30', (199680, 2973)
'34f82ce2482f5039176bfad68fea3b660cce46a2', (202752, 621)
'4104c4ae613928612ddf45359338643833764a9b', (203776, 2960)
'773e7701fa9b21d32135f2e0a2f80d9f3bf60ff0', (206848, 860)
'e17f414155f0f81443c79d3af49e3295fe38a3bd', (207872, 1215)
'9dc92c6363cf76df823e9cc93dc3e6f961f3dfae', (209408, 3443)
'24973771dad2d9beb6306802d11b103627a0c86f', (212992, 4463)
'4a55d6c6e33565c8c2976ffffed0544c8f01f11f', (217600, 4079)
'c3f859fc47cf393cc3af2e7854c73143039219f9', (221696, 3541)
'1c304bd9336e59ee7fe03ec0aa3ff1316accbd42', (225280, 3182)
'79b019d7c272dbdfc8264e8e42b2c88d7aa7c951', (228864, 2189)
'ead9751f11bc2db068f915f8cd6a798e5d417ee1', (231424, 2224)
'90e7b637f88d5ff8a781be0ca4c1682885c17e4a', (233984, 444)
'b08cfc768cc8c1c5165a02d633b7c5f3cdfdfb60', (234496, 958)
'2182939be9737c58873eafa07078daccddac9b6a', (235520, 3237)
'063189b9af2fa70eb9ecea7fd40adb28d9ad63a9', (239104, 3331)
'45c057f3f3dbc663bc9d7661254c3df17328111c', (242688, 3248)
'3a9191ae0dde33b5d4d9bc89da4b87436400f505', (246272, 3790)
'bb71bde6522266d699c50af9825a6b4f0f025d56', (250368, 3858)
| promptflow/src/promptflow/tests/test_configs/node_recordings/node_cache.shelve.bak/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/node_recordings/node_cache.shelve.bak",
"repo_id": "promptflow",
"token_count": 3069
} | 75 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.024'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.146'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceworkingdirectory
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceworkingdirectory",
"name": "workspaceworkingdirectory", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
false, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureFile", "accountName": "fake_account_name",
"fileShareName": "fake-file-share-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "None"}, "systemData": {"createdAt":
"2023-04-08T02:53:06.6001169+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.2885525+00:00",
"lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType":
"Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1161'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.088'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceworkingdirectory/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.145'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:07 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/LocalUpload?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceAlreadyExists</Code><Message>The
specified resource already exists.\nRequestId:494627b8-e01a-00db-2b04-45cbb1000000\nTime:2024-01-12T03:10:08.5872550Z</Message></Error>"
headers:
content-length:
- '228'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceAlreadyExists
x-ms-version:
- '2023-08-03'
status:
code: 409
message: The specified resource already exists.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:09 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceAlreadyExists</Code><Message>The
specified resource already exists.\nRequestId:2015fefb-201a-0099-7a04-457231000000\nTime:2024-01-12T03:10:10.8558247Z</Message></Error>"
headers:
content-length:
- '228'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceAlreadyExists
x-ms-version:
- '2023-08-03'
status:
code: 409
message: The specified resource already exists.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:10 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceAlreadyExists</Code><Message>The
specified resource already exists.\nRequestId:e91ceca5-f01a-007e-3004-459dcb000000\nTime:2024-01-12T03:10:11.9389239Z</Message></Error>"
headers:
content-length:
- '228'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceAlreadyExists
x-ms-version:
- '2023-08-03'
status:
code: 409
message: The specified resource already exists.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:12 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user%2Fpromptflow?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceAlreadyExists</Code><Message>The
specified resource already exists.\nRequestId:dc666209-a01a-0073-2104-45551f000000\nTime:2024-01-12T03:10:12.9765801Z</Message></Error>"
headers:
content-length:
- '228'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceAlreadyExists
x-ms-version:
- '2023-08-03'
status:
code: 409
message: The specified resource already exists.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:13 GMT
x-ms-version:
- '2023-08-03'
method: GET
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user%2Fpromptflow%2Fflow_name?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceNotFound</Code><Message>The
specified resource does not exist.\nRequestId:37a5a67c-001a-00ec-2304-45191d000000\nTime:2024-01-12T03:10:14.0112840Z</Message></Error>"
headers:
content-length:
- '223'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceNotFound
x-ms-version:
- '2023-08-03'
status:
code: 404
message: The specified resource does not exist.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:14 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user%2Fpromptflow%2Fflow_name?restype=directory
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 03:10:15 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Directory
x-ms-file-change-time:
- '2024-01-12T03:10:15.0262208Z'
x-ms-file-creation-time:
- '2024-01-12T03:10:15.0262208Z'
x-ms-file-id:
- '13835129046796599296'
x-ms-file-last-write-time:
- '2024-01-12T03:10:15.0262208Z'
x-ms-file-parent-id:
- '10088082484072808448'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:15 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user%2Fpromptflow%2Fflow_name%2F__pycache__?restype=directory
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 03:10:16 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Directory
x-ms-file-change-time:
- '2024-01-12T03:10:16.0895486Z'
x-ms-file-creation-time:
- '2024-01-12T03:10:16.0895486Z'
x-ms-file-id:
- '13835093862424510464'
x-ms-file-last-write-time:
- '2024-01-12T03:10:16.0895486Z'
x-ms-file-parent-id:
- '13835129046796599296'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-content-length:
- '14'
x-ms-date:
- Fri, 12 Jan 2024 03:10:16 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- Inherit
x-ms-type:
- file
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/.gitattributes
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 03:10:17 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- '2024-01-12T03:10:17.0911484Z'
x-ms-file-creation-time:
- '2024-01-12T03:10:17.0911484Z'
x-ms-file-id:
- '13835164231168688128'
x-ms-file-last-write-time:
- '2024-01-12T03:10:17.0911484Z'
x-ms-file-parent-id:
- '13835129046796599296'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: '* text eol=lf
'
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '14'
Content-MD5:
- nYmkCopuDuFj82431amzZw==
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:17 GMT
x-ms-range:
- bytes=0-13
x-ms-version:
- '2023-08-03'
x-ms-write:
- update
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/.gitattributes?comp=range
response:
body:
string: ''
headers:
content-length:
- '0'
content-md5:
- nYmkCopuDuFj82431amzZw==
last-modified:
- Fri, 12 Jan 2024 03:10:18 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-last-write-time:
- '2024-01-12T03:10:18.1285902Z'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-content-length:
- '250'
x-ms-date:
- Fri, 12 Jan 2024 03:10:18 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- Inherit
x-ms-type:
- file
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/flow.dag.yaml
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 03:10:19 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- '2024-01-12T03:10:19.1789755Z'
x-ms-file-creation-time:
- '2024-01-12T03:10:19.1789755Z'
x-ms-file-id:
- '13835076270238466048'
x-ms-file-last-write-time:
- '2024-01-12T03:10:19.1789755Z'
x-ms-file-parent-id:
- '13835129046796599296'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: "inputs:\n name:\n type: string\n default: hod\noutputs:\n result:\n
\ type: string\n reference: ${hello_world.output}\nnodes:\n- name: hello_world\n
\ type: python\n source:\n type: code\n path: hello_world.py\n inputs:\n
\ name: ${inputs.name}\n"
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '250'
Content-MD5:
- CT1FTZp5JScB8fq+HjnINw==
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:19 GMT
x-ms-range:
- bytes=0-249
x-ms-version:
- '2023-08-03'
x-ms-write:
- update
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/flow.dag.yaml?comp=range
response:
body:
string: ''
headers:
content-length:
- '0'
content-md5:
- CT1FTZp5JScB8fq+HjnINw==
last-modified:
- Fri, 12 Jan 2024 03:10:20 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-last-write-time:
- '2024-01-12T03:10:20.2164176Z'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-content-length:
- '105'
x-ms-date:
- Fri, 12 Jan 2024 03:10:20 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- Inherit
x-ms-type:
- file
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/hello_world.py
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 03:10:21 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- '2024-01-12T03:10:21.2658070Z'
x-ms-file-creation-time:
- '2024-01-12T03:10:21.2658070Z'
x-ms-file-id:
- '13835146638982643712'
x-ms-file-last-write-time:
- '2024-01-12T03:10:21.2658070Z'
x-ms-file-parent-id:
- '13835129046796599296'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: "from promptflow import tool\n\n\n@tool\ndef hello_world(name: str) -> str:\n
\ return f\"Hello World {name}!\"\n"
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '105'
Content-MD5:
- fGMkkiZAjGs8PW/AMiYppA==
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:21 GMT
x-ms-range:
- bytes=0-104
x-ms-version:
- '2023-08-03'
x-ms-write:
- update
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/hello_world.py?comp=range
response:
body:
string: ''
headers:
content-length:
- '0'
content-md5:
- fGMkkiZAjGs8PW/AMiYppA==
last-modified:
- Fri, 12 Jan 2024 03:10:22 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-last-write-time:
- '2024-01-12T03:10:22.2823416Z'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: '{"flowName": "flow_display_name", "description": "test flow description",
"tags": {"owner": "sdk-test"}, "flowDefinitionFilePath": "Users/unknown_user/promptflow/flow_name/flow.dag.yaml",
"flowType": "default"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '282'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/Flows
response:
body:
string: '{"eTag": {}, "studioPortalEndpoint": "https://ml.azure.com/prompts/flow/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/b7731056-ea64-481e-ad53-c45f2363c29a/details?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"flowId": "b7731056-ea64-481e-ad53-c45f2363c29a", "flowName": "flow_display_name",
"description": "test flow description", "tags": {"owner": "sdk-test"}, "flowType":
"Default", "experimentId": "00000000-0000-0000-0000-000000000000", "createdDate":
"2024-01-12T03:10:24.9118275Z", "lastModifiedDate": "2024-01-12T03:10:24.9118275Z",
"owner": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"},
"flowResourceId": "azureml://locations/eastus/workspaces/00000/flows/b7731056-ea64-481e-ad53-c45f2363c29a",
"isArchived": false, "flowDefinitionFilePath": "Users/unknown_user/promptflow/flow_name/flow.dag.yaml"}'
headers:
connection:
- keep-alive
content-length:
- '1100'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.569'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 03:10:25 GMT
x-ms-version:
- '2023-08-03'
method: HEAD
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/flow.dag.yaml
response:
body:
string: ''
headers:
content-length:
- '250'
content-type:
- application/octet-stream
last-modified:
- Fri, 12 Jan 2024 03:10:20 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- '2024-01-12T03:10:20.2164176Z'
x-ms-file-creation-time:
- '2024-01-12T03:10:19.1789755Z'
x-ms-file-id:
- '13835076270238466048'
x-ms-file-last-write-time:
- '2024-01-12T03:10:20.2164176Z'
x-ms-file-parent-id:
- '13835129046796599296'
x-ms-type:
- File
x-ms-version:
- '2023-08-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/Flows/b7731056-ea64-481e-ad53-c45f2363c29a?experimentId=00000000-0000-0000-0000-000000000000
response:
body:
string: '{"timestamp": "2024-01-12T03:10:24.9429158+00:00", "eTag": {}, "studioPortalEndpoint":
"https://ml.azure.com/prompts/flow/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/b7731056-ea64-481e-ad53-c45f2363c29a/details?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"flowId": "b7731056-ea64-481e-ad53-c45f2363c29a", "flowName": "flow_display_name",
"description": "test flow description", "tags": {"owner": "sdk-test"}, "flowType":
"Default", "experimentId": "00000000-0000-0000-0000-000000000000", "createdDate":
"2024-01-12T03:10:24.9118275Z", "lastModifiedDate": "2024-01-12T03:10:24.9118275Z",
"owner": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"},
"flowResourceId": "azureml://locations/eastus/workspaces/00000/flows/b7731056-ea64-481e-ad53-c45f2363c29a",
"isArchived": false, "flowDefinitionFilePath": "Users/unknown_user/promptflow/flow_name/flow.dag.yaml"}'
headers:
connection:
- keep-alive
content-length:
- '1148'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.541'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_flow_operations_TestFlow_test_get_flow.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_flow_operations_TestFlow_test_get_flow.yaml",
"repo_id": "promptflow",
"token_count": 14977
} | 76 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.023'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.108'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.084'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.145'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:49:52 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/data.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '52'
content-md5:
- kHimciLnA7d3/I2LBUeLNA==
content-type:
- application/octet-stream
last-modified:
- Fri, 22 Sep 2023 09:37:22 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Fri, 22 Sep 2023 09:37:22 GMT
x-ms-meta-name:
- db87715d-65de-40cc-a281-09c0115699f3
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 6666ca9c-d7c3-4d85-b18c-12643adb9046
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:49:53 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/data.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.094'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.128'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:49:56 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/partial_fail/data.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '52'
content-md5:
- kHimciLnA7d3/I2LBUeLNA==
content-type:
- application/octet-stream
last-modified:
- Fri, 22 Sep 2023 09:37:31 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Fri, 22 Sep 2023 09:37:30 GMT
x-ms-meta-name:
- aa1844d8-4898-4daa-8100-6140558fc7c9
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:49:57 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/partial_fail/data.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/partial_fail/flow.dag.yaml",
"runId": "name", "runDisplayName": "name", "runExperimentName": "", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/data.jsonl"},
"inputsMapping": {}, "connections": {}, "environmentVariables": {}, "runtimeName":
"fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '773'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '9.976'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/bbbb2b4cfb3d236b4f9b6110fd82264c/data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12846'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.208'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/bbbb2b4cfb3d236b4f9b6110fd82264c/data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12846'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.222'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/bbbb2b4cfb3d236b4f9b6110fd82264c/data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12846'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.226'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/bbbb2b4cfb3d236b4f9b6110fd82264c/data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12846'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.208'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "print_env", "type": "python", "source":
{"type": "code", "path": "print_env.py"}, "inputs": {"key": "${inputs.key}"},
"tool": "print_env.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "print_env.py", "type": "python",
"inputs": {"key": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "print_env.py", "function": "get_env_var",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"key": {"type": "string", "is_chat_input": false}}, "outputs": {"output":
{"type": "string", "reference": "${print_env.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/bbbb2b4cfb3d236b4f9b6110fd82264c/data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12846'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.665'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/childRuns?endIndex=24&startIndex=0
response:
body:
string: '[{"run_id": "name_0", "status": "Completed", "error": null, "inputs":
{"key": "no", "line_number": 0}, "output": {"output": null}, "metrics": null,
"request": null, "parent_run_id": "name", "root_run_id": "name", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:50:26.771369Z",
"end_time": "2024-01-12T08:50:26.777759Z", "index": 0, "api_calls": [{"name":
"get_env_var", "type": "Tool", "inputs": {"key": "no"}, "output": {"value":
null}, "start_time": 1705049426.774557, "end_time": 1705049426.775613, "error":
null, "children": null, "node_name": "print_env"}], "variant_id": "", "name":
"", "description": "", "tags": null, "system_metrics": {"duration": 0.00639,
"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "result":
{"output": null}, "upload_metrics": false}, {"run_id": "name_2", "status":
"Completed", "error": null, "inputs": {"key": "matter", "line_number": 2},
"output": {"output": null}, "metrics": null, "request": null, "parent_run_id":
"name", "root_run_id": "name", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:50:26.88379Z", "end_time": "2024-01-12T08:50:26.889697Z",
"index": 2, "api_calls": [{"name": "get_env_var", "type": "Tool", "inputs":
{"key": "matter"}, "output": {"value": null}, "start_time": 1705049426.887009,
"end_time": 1705049426.888023, "error": null, "children": null, "node_name":
"print_env"}], "variant_id": "", "name": "", "description": "", "tags": null,
"system_metrics": {"duration": 0.005907, "prompt_tokens": 0, "completion_tokens":
0, "total_tokens": 0}, "result": {"output": null}, "upload_metrics": false},
{"run_id": "name_1", "status": "Failed", "error": {"message": "Execution failure
in ''print_env'': (Exception) expected raise!", "messageFormat": "Execution
failure in ''{node_name}'': {error_type_and_message}", "messageParameters":
{"node_name": "print_env", "error_type_and_message": "(Exception) expected
raise!"}, "referenceCode": "Tool/__pf_main__", "code": "UserError", "innerError":
{"code": "ToolExecutionError", "innerError": null}, "additionalInfo": [{"type":
"ToolExecutionErrorDetails", "info": {"type": "Exception", "message": "expected
raise!", "traceback": "Traceback (most recent call last):\n File \"/mnt/host/service/app/39415/requests/name/print_env.py\",
line 9, in get_env_var\n raise Exception(\"expected raise!\")\nException:
expected raise!\n", "filename": "/mnt/host/service/app/39415/requests/name/print_env.py",
"lineno": 9, "name": "get_env_var"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''print_env'': (Exception) expected raise!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"expected raise!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39415/requests/name/print_env.py\",
line 9, in get_env_var\n raise Exception(\"expected raise!\")\n", "innerException":
null}}}, "inputs": {"key": "raise", "line_number": 1}, "output": null, "metrics":
null, "request": null, "parent_run_id": "name", "root_run_id": "name", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:50:26.77979Z",
"end_time": "2024-01-12T08:50:26.953981Z", "index": 1, "api_calls": [{"name":
"get_env_var", "type": "Tool", "inputs": {"key": "raise"}, "output": null,
"start_time": 1705049426.782672, "end_time": 1705049426.783439, "error": {"message":
"expected raise!", "type": "Exception"}, "children": null, "node_name": "print_env"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.174191, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}]'
headers:
connection:
- keep-alive
content-length:
- '6638'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.729'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/childRuns?endIndex=49&startIndex=25
response:
body:
string: '[]'
headers:
connection:
- keep-alive
content-length:
- '2'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '1.083'
status:
code: 200
message: OK
- request:
body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true,
"selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705049404, "rootRunId": "name", "createdUtc":
"2024-01-12T08:50:04.3467507+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": {"error": {"code": "UserError", "severity":
null, "message": "Execution failure in ''print_env'': (Exception) expected
raise!", "messageFormat": "{\"totalChildRuns\": 3, \"userErrorChildRuns\":
1, \"systemErrorChildRuns\": 0, \"errorDetails\": [{\"code\": \"UserError/ToolExecutionError\",
\"messageFormat\": \"Execution failure in ''{node_name}'': {error_type_and_message}\",
\"count\": 1}]}", "messageParameters": {"node_name": "print_env", "error_type_and_message":
"(Exception) expected raise!"}, "referenceCode": "Tool/__pf_main__", "detailsUri":
null, "target": null, "details": [], "innerError": {"code": "ToolExecutionError",
"innerError": null}, "debugInfo": {"type": "ToolExecutionError", "message":
"Execution failure in ''print_env'': (Exception) expected raise!", "stackTrace":
"\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"expected raise!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39415/requests/name/print_env.py\",
line 9, in get_env_var\n raise Exception(\"expected raise!\")\n", "innerException":
null, "data": null, "errorResponse": null}, "data": null, "errorResponse":
null}, "additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "expected raise!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39415/requests/name/print_env.py\",
line 9, in get_env_var\n raise Exception(\"expected raise!\")\nException:
expected raise!\n", "filename": "/mnt/host/service/app/39415/requests/name/print_env.py",
"lineno": 9, "name": "get_env_var"}}]}, "correlation": null, "environment":
null, "location": null, "time": "2024-01-12T08:50:30.744399+00:00", "componentName":
"promptflow-runtime/20231204.v4 Designer/1.0 promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0) promptflow/1.2.0rc1"}, "warnings":
null, "revision": 7, "statusRevision": 3, "runUuid": "d689f844-6eae-4a78-a210-4779ec098e1e",
"parentRunUuid": null, "rootRunUuid": "d689f844-6eae-4a78-a210-4779ec098e1e",
"lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:04.8547843",
"effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:50:30.0347672+00:00", "duration":
"00:00:04.8547843", "cancelationReason": null, "currentAttemptId": 1, "runId":
"name", "parentRunId": null, "experimentId": "1848033e-509f-4c52-92ee-f0a0121fe99e",
"status": "Completed", "startTimeUtc": "2024-01-12T08:50:26.1204745+00:00",
"endTimeUtc": "2024-01-12T08:50:30.9752588+00:00", "scheduleId": null, "displayName":
"name", "name": null, "dataContainerId": "dcid.name", "description": null,
"hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator":
null, "traits": [], "attribution": "PromptFlow", "computeType": "AmlcDsi"},
"properties": {"azureml.promptflow.runtime_name": "test-runtime-ci", "azureml.promptflow.runtime_version":
"20231204.v4", "azureml.promptflow.definition_file_name": "flow.dag.yaml",
"azureml.promptflow.session_id": "31858a8dfc61a642bb0ab6df4fc3ac7b3807de4ffead00d1",
"azureml.promptflow.flow_lineage_id": "de293df4f50622090c0225852d59cd663b6b629e38728f7444fa0f12255a0647",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/bc20fa079592a8072922533f187e3184/partial_fail/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/bbbb2b4cfb3d236b4f9b6110fd82264c/data.jsonl",
"_azureml.evaluation_run": "promptflow.BatchRun", "azureml.promptflow.snapshot_id":
"f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8", "azureml.promptflow.total_tokens":
"0", "_azureml.evaluate_artifacts": "[{\"path\": \"instance_results.jsonl\",
\"type\": \"table\"}]"}, "parameters": {}, "actionUris": {}, "scriptName":
null, "target": null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings":
{}, "services": {}, "inputDatasets": [], "outputDatasets": [], "runDefinition":
null, "jobSpecification": null, "primaryMetricName": null, "createdFrom":
null, "cancelUri": null, "completeUri": null, "diagnosticsUri": null, "computeRequest":
null, "compute": null, "retainForLifetimeOfWorkspace": false, "queueingInfo":
null, "inputs": null, "outputs": {"debug_info": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '9873'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.037'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/logContent
response:
body:
string: '"2024-01-12 08:50:11 +0000 78 promptflow-runtime INFO [name]
Receiving v2 bulk run request afd6522c-26b0-4e2e-966c-43176d74cb1f: {\"flow_id\":
\"name\", \"flow_run_id\": \"name\", \"flow_source\": {\"flow_source_type\":
1, \"flow_source_info\": {\"snapshot_id\": \"f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A46%3A24Z&ske=2024-01-13T15%3A56%3A24Z&sks=b&skv=2019-07-07&st=2024-01-12T08%3A40%3A10Z&se=2024-01-12T16%3A50%3A10Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/bbbb2b4cfb3d236b4f9b6110fd82264c/data.jsonl\"},
\"azure_storage_setting\": {\"azure_storage_mode\": 1, \"storage_account_name\":
\"promptfloweast4063704120\", \"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\",
\"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/name\", \"blob_container_sas_token\":
\"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T08%3A50%3A11Z&ske=2024-01-19T08%3A50%3A11Z&sks=b&skv=2019-07-07&se=2024-01-19T08%3A50%3A11Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 08:50:11 +0000 78
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:50:11 +0000 78 promptflow-runtime INFO Updating
name to Status.Preparing...\n2024-01-12 08:50:11 +0000 78 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/39415/requests/name\n2024-01-12
08:50:11 +0000 78 promptflow-runtime INFO Get snapshot sas url for
f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8...\n2024-01-12 08:50:18 +0000 78
promptflow-runtime INFO Downloading snapshot f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8
from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8.zip...\n2024-01-12
08:50:18 +0000 78 promptflow-runtime INFO Downloaded file /mnt/host/service/app/39415/requests/name/f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8.zip
with size 701 for snapshot f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8.\n2024-01-12
08:50:18 +0000 78 promptflow-runtime INFO Download snapshot f36c2e06-b2b3-4ee5-9ed4-127ae490ffa8
completed.\n2024-01-12 08:50:18 +0000 78 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/39415/requests/name\n2024-01-12
08:50:18 +0000 78 promptflow-runtime INFO About to execute a python
flow.\n2024-01-12 08:50:18 +0000 78 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-12 08:50:18 +0000 78 promptflow-runtime
INFO Starting to check process 5940 status for run name\n2024-01-12 08:50:18
+0000 78 promptflow-runtime INFO Start checking run status for run
name\n2024-01-12 08:50:22 +0000 5940 promptflow-runtime INFO [78--5940]
Start processing flowV2......\n2024-01-12 08:50:22 +0000 5940 promptflow-runtime
INFO Runtime version: 20231204.v4. PromptFlow version: 1.2.0rc1\n2024-01-12
08:50:22 +0000 5940 promptflow-runtime INFO Setting mlflow tracking
uri...\n2024-01-12 08:50:23 +0000 5940 promptflow-runtime INFO Validating
''AzureML Data Scientist'' user authentication...\n2024-01-12 08:50:25 +0000 5940
promptflow-runtime INFO Successfully validated ''AzureML Data Scientist''
user authentication.\n2024-01-12 08:50:25 +0000 5940 promptflow-runtime
INFO Using AzureMLRunStorageV2\n2024-01-12 08:50:25 +0000 5940 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:50:25 +0000 5940 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-12 08:50:25 +0000 5940 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:50:25 +0000 5940 promptflow-runtime INFO Resolve data from url finished
in 0.47867807000875473 seconds\n2024-01-12 08:50:25 +0000 5940 promptflow-runtime
INFO Starting the aml run ''name''...\n2024-01-12 08:50:26 +0000 5940
execution WARNING Starting run without column mapping may lead to
unexpected results. Please consult the following documentation for more information:
https://aka.ms/pf/column-mapping\n2024-01-12 08:50:26 +0000 5940 execution.bulk INFO Using
fork, process count: 3\n2024-01-12 08:50:26 +0000 5987 execution.bulk INFO Process
5987 started.\n2024-01-12 08:50:26 +0000 5992 execution.bulk INFO Process
5992 started.\n2024-01-12 08:50:26 +0000 5940 execution.bulk INFO Process
name: ForkProcess-74:3, Process id: 5987, Line number: 0 start execution.\n2024-01-12
08:50:26 +0000 5940 execution.bulk INFO Process name: ForkProcess-74:4,
Process id: 5992, Line number: 1 start execution.\n2024-01-12 08:50:26 +0000 5981
execution.bulk INFO Process 5981 started.\n2024-01-12 08:50:26 +0000 5940
execution.bulk INFO Process name: ForkProcess-74:2, Process id: 5981,
Line number: 2 start execution.\n2024-01-12 08:50:26 +0000 5940 execution.bulk INFO Process
name: ForkProcess-74:3, Process id: 5987, Line number: 0 completed.\n2024-01-12
08:50:26 +0000 5940 execution.bulk INFO Finished 1 / 3 lines.\n2024-01-12
08:50:26 +0000 5992 execution ERROR Node print_env in line
1 failed. Exception: Execution failure in ''print_env'': (Exception) expected
raise!.\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39415/requests/name/print_env.py\",
line 9, in get_env_var\n raise Exception(\"expected raise!\")\nException:
expected raise!\n\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\npromptflow._core._errors.ToolExecutionError: Execution
failure in ''print_env'': (Exception) expected raise!\n2024-01-12 08:50:26
+0000 5940 execution.bulk INFO Average execution time for completed
lines: 0.24 seconds. Estimated time for incomplete lines: 0.48 seconds.\n2024-01-12
08:50:26 +0000 5992 execution ERROR Execution of one node has
failed. Cancelling all running nodes: print_env.\n2024-01-12 08:50:26 +0000 5940
execution.bulk INFO Process name: ForkProcess-74:2, Process id: 5981,
Line number: 2 completed.\n2024-01-12 08:50:26 +0000 5940 execution.bulk INFO Finished
2 / 3 lines.\n2024-01-12 08:50:27 +0000 5940 execution.bulk INFO Average
execution time for completed lines: 0.16 seconds. Estimated time for incomplete
lines: 0.16 seconds.\n2024-01-12 08:50:27 +0000 5940 execution.bulk INFO Process
name: ForkProcess-74:4, Process id: 5992, Line number: 1 completed.\n2024-01-12
08:50:27 +0000 5940 execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12
08:50:27 +0000 5940 execution.bulk INFO Average execution time
for completed lines: 0.18 seconds. Estimated time for incomplete lines: 0.0
seconds.\n2024-01-12 08:50:28 +0000 5940 execution ERROR 1/3
flow run failed, indexes: [1], exception of index 1: Execution failure in
''print_env'': (Exception) expected raise!\n2024-01-12 08:50:29 +0000 5940
execution.bulk INFO Upload status summary metrics for run name finished
in 1.7115172129124403 seconds\n2024-01-12 08:50:30 +0000 5940 promptflow-runtime
INFO Successfully write run properties {\"azureml.promptflow.total_tokens\":
0, \"_azureml.evaluate_artifacts\": \"[{\\\"path\\\": \\\"instance_results.jsonl\\\",
\\\"type\\\": \\\"table\\\"}]\"} with run id ''name''\n2024-01-12 08:50:30
+0000 5940 execution.bulk INFO Upload RH properties for run name
finished in 0.08352885954082012 seconds\n2024-01-12 08:50:30 +0000 5940
promptflow-runtime INFO Creating unregistered output Asset for Run name...\n2024-01-12
08:50:30 +0000 5940 promptflow-runtime INFO Created debug_info Asset:
azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1\n2024-01-12
08:50:30 +0000 5940 promptflow-runtime INFO Creating unregistered output
Asset for Run name...\n2024-01-12 08:50:30 +0000 5940 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1\n2024-01-12
08:50:30 +0000 5940 promptflow-runtime INFO Creating Artifact for Run
name...\n2024-01-12 08:50:30 +0000 5940 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-12 08:50:30 +0000 5940 promptflow-runtime
INFO Patching name...\n2024-01-12 08:50:30 +0000 5940 promptflow-runtime
WARNING [name] Run failed. Execution stackTrace: Traceback (most recent call
last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n [REDACTED:
External StackTrace]\n\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n [REDACTED:
External StackTrace]\n\n2024-01-12 08:50:30 +0000 5940 promptflow-runtime
INFO Ending the aml run ''name'' with status ''Completed''...\n2024-01-12
08:50:32 +0000 78 promptflow-runtime INFO Process 5940 finished\n2024-01-12
08:50:32 +0000 78 promptflow-runtime INFO [78] Child process finished!\n2024-01-12
08:50:32 +0000 78 promptflow-runtime INFO [name] End processing bulk
run\n2024-01-12 08:50:32 +0000 78 promptflow-runtime INFO Cleanup
working dir /mnt/host/service/app/39415/requests/name for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '13657'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.694'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_detail_against_partial_fail_run.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_detail_against_partial_fail_run.yaml",
"repo_id": "promptflow",
"token_count": 54461
} | 77 |
name: flow_run_20230629_101205
description: sample bulk run
# flow relative to current working directory should not be supported.
flow: tests/test_configs/flows/web_classification
data: ../datas/webClassification1.jsonl
column_mapping:
url: "${data.url}"
variant: ${summarize_text_content.variant_0}
# run config: env related
environment_variables: env_file
| promptflow/src/promptflow/tests/test_configs/runs/bulk_run_invalid_flow_path.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/runs/bulk_run_invalid_flow_path.yaml",
"repo_id": "promptflow",
"token_count": 115
} | 78 |
from jinja2 import Template
from promptflow.connections import CustomConnection
from promptflow import ToolProvider, tool
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.types import PromptTemplate
class TestCustomLLMTool(ToolProvider):
def __init__(self, connection: AzureOpenAIConnection):
super().__init__()
self.connection = connection
@tool(
name="My Custom LLM Tool",
type="custom_llm",
description="This is a tool to demonstrate the custom_llm tool type",
)
def tool_func(self, api: str, template: PromptTemplate, **kwargs):
pass
@tool(
name="My Custom LLM Tool",
type="custom_llm",
description="This is a tool to demonstrate the custom_llm tool type",
)
def my_tool(connection: CustomConnection, prompt: PromptTemplate, **kwargs) -> str:
# Replace with your tool code, customise your own code to handle and use the prompt here.
# Usually connection contains configs to connect to an API.
# Not all tools need a connection. You can remove it if you don't need it.
rendered_prompt = Template(prompt, trim_blocks=True, keep_trailing_newline=True).render(**kwargs)
return rendered_prompt
| promptflow/src/promptflow/tests/test_configs/tools/custom_llm_tool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/tools/custom_llm_tool.py",
"repo_id": "promptflow",
"token_count": 396
} | 79 |
inputs:
text:
type: string
outputs:
output:
type: string
reference: ${summarize_text_content.output}
nodes:
- name: summarize_text_content
type: llm
source:
type: code
path: summarize_text_content__variant_1.jinja2
inputs:
deployment_name: text-davinci-003
suffix: ''
max_tokens: '256'
temperature: '0.2'
top_p: '1.0'
logprobs: ''
echo: 'False'
stop: ''
presence_penalty: '0'
frequency_penalty: '0'
best_of: '1'
logit_bias: ''
text: ${inputs.text}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: completion_1
module: promptflow.tools.aoai
| promptflow/src/promptflow/tests/test_configs/wrong_flows/wrong_api/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/wrong_api/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 280
} | 80 |
# Prompt flow
[](https://pypi.org/project/promptflow/)
[](https://pypi.python.org/pypi/promptflow/)
[](https://pypi.org/project/promptflow/)
[](https://microsoft.github.io/promptflow/reference/pf-command-reference.html)
[](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow)
[](https://microsoft.github.io/promptflow/index.html)
[](https://github.com/microsoft/promptflow/issues/new/choose)
[](https://github.com/microsoft/promptflow/issues/new/choose)
[](https://github.com/microsoft/promptflow/blob/main/CONTRIBUTING.md)
[](https://github.com/microsoft/promptflow/blob/main/LICENSE)
> Welcome to join us to make prompt flow better by
> participating [discussions](https://github.com/microsoft/promptflow/discussions),
> opening [issues](https://github.com/microsoft/promptflow/issues/new/choose),
> submitting [PRs](https://github.com/microsoft/promptflow/pulls).
**Prompt flow** is a suite of development tools designed to streamline the end-to-end development cycle of LLM-based AI applications, from ideation, prototyping, testing, evaluation to production deployment and monitoring. It makes prompt engineering much easier and enables you to build LLM apps with production quality.
With prompt flow, you will be able to:
- **Create and iteratively develop flow**
- Create executable [flows](https://microsoft.github.io/promptflow/concepts/concept-flows.html) that link LLMs, prompts, Python code and other [tools](https://microsoft.github.io/promptflow/concepts/concept-tools.html) together.
- Debug and iterate your flows, especially the [interaction with LLMs](https://microsoft.github.io/promptflow/concepts/concept-connections.html) with ease.
- **Evaluate flow quality and performance**
- Evaluate your flow's quality and performance with larger datasets.
- Integrate the testing and evaluation into your CI/CD system to ensure quality of your flow.
- **Streamlined development cycle for production**
- Deploy your flow to the serving platform you choose or integrate into your app's code base easily.
- (Optional but highly recommended) Collaborate with your team by leveraging the cloud version of [Prompt flow in Azure AI](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/overview-what-is-prompt-flow?view=azureml-api-2).
------
## Installation
To get started quickly, you can use a pre-built development environment. **Click the button below** to open the repo in GitHub Codespaces, and then continue the readme!
[](https://codespaces.new/microsoft/promptflow?quickstart=1)
If you want to get started in your local environment, first install the packages:
Ensure you have a python environment, `python=3.9` is recommended.
```sh
pip install promptflow promptflow-tools
```
## Quick Start ⚡
**Create a chatbot with prompt flow**
Run the command to initiate a prompt flow from a chat template, it creates folder named `my_chatbot` and generates required files within it:
```sh
pf flow init --flow ./my_chatbot --type chat
```
**Setup a connection for your API key**
For OpenAI key, establish a connection by running the command, using the `openai.yaml` file in the `my_chatbot` folder, which stores your OpenAI key (override keys and name with --set to avoid yaml file changes):
```sh
pf connection create --file ./my_chatbot/openai.yaml --set api_key=<your_api_key> --name open_ai_connection
```
For Azure OpenAI key, establish the connection by running the command, using the `azure_openai.yaml` file:
```sh
pf connection create --file ./my_chatbot/azure_openai.yaml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
**Chat with your flow**
In the `my_chatbot` folder, there's a `flow.dag.yaml` file that outlines the flow, including inputs/outputs, nodes, connection, and the LLM model, etc
> Note that in the `chat` node, we're using a connection named `open_ai_connection` (specified in `connection` field) and the `gpt-35-turbo` model (specified in `deployment_name` field). The deployment_name filed is to specify the OpenAI model, or the Azure OpenAI deployment resource.
Interact with your chatbot by running: (press `Ctrl + C` to end the session)
```sh
pf flow test --flow ./my_chatbot --interactive
```
**Core value: ensuring "High Quality” from prototype to production**
Explore our [**15-minute tutorial**](examples/tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md) that guides you through prompt tuning ➡ batch testing ➡ evaluation, all designed to ensure high quality ready for production.
Next Step! Continue with the **Tutorial** 👇 section to delve deeper into prompt flow.
## Tutorial 🏃♂️
Prompt flow is a tool designed to **build high quality LLM apps**, the development process in prompt flow follows these steps: develop a flow, improve the flow quality, deploy the flow to production.
### Develop your own LLM apps
#### VS Code Extension
We also offer a VS Code extension (a flow designer) for an interactive flow development experience with UI.
<img src="examples/tutorials/quick-start/media/vsc.png" alt="vsc" width="1000"/>
You can install it from the <a href="https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow">visualstudio marketplace</a>.
#### Deep delve into flow development
[Getting started with prompt flow](https://microsoft.github.io/promptflow/how-to-guides/quick-start.html): A step by step guidance to invoke your first flow run.
### Learn from use cases
[Tutorial: Chat with PDF](https://github.com/microsoft/promptflow/blob/main/examples/tutorials/e2e-development/chat-with-pdf.md): An end-to-end tutorial on how to build a high quality chat application with prompt flow, including flow development and evaluation with metrics.
> More examples can be found [here](https://microsoft.github.io/promptflow/tutorials/index.html#samples). We welcome contributions of new use cases!
### Setup for contributors
If you're interested in contributing, please start with our dev setup guide: [dev_setup.md](./docs/dev/dev_setup.md).
Next Step! Continue with the **Contributing** 👇 section to contribute to prompt flow.
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com.
When you submit a pull request, a CLA bot will automatically determine whether you need to provide
a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions
provided by the bot. You will only need to do this once across all repos using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
## Trademarks
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
trademarks or logos is subject to and must follow
[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
Any use of third-party trademarks or logos are subject to those third-party's policies.
## Code of Conduct
This project has adopted the
[Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the
[Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [[email protected]](mailto:[email protected])
with any additional questions or comments.
## Data Collection
The software may collect information about you and your use of the software and
send it to Microsoft if configured to enable telemetry.
Microsoft may use this information to provide services and improve our products and services.
You may turn on the telemetry as described in the repository.
There are also some features in the software that may enable you and Microsoft
to collect data from users of your applications. If you use these features, you
must comply with applicable law, including providing appropriate notices to
users of your applications together with a copy of Microsoft's privacy
statement. Our privacy statement is located at
https://go.microsoft.com/fwlink/?LinkID=824704. You can learn more about data
collection and use in the help documentation and our privacy statement. Your
use of the software operates as your consent to these practices.
### Telemetry Configuration
Telemetry collection is on by default.
To opt out, please run `pf config set telemetry.enabled=false` to turn it off.
## License
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the [MIT](LICENSE) license.
| promptflow/README.md/0 | {
"file_path": "promptflow/README.md",
"repo_id": "promptflow",
"token_count": 2770
} | 0 |
# Adding a Tool Icon
A tool icon serves as a graphical representation of your tool in the user interface (UI). Follow this guidance to add a custom tool icon when developing your own tool package.
Adding a custom tool icon is optional. If you do not provide one, the system uses a default icon.
## Prerequisites
- Please ensure that your [Prompt flow for VS Code](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) is updated to version 1.4.2 or later.
- Create a tool package as described in [Create and Use Tool Package](create-and-use-tool-package.md).
- Prepare custom icon image that meets these requirements:
- Use PNG, JPG or BMP format.
- 16x16 pixels to prevent distortion when resizing.
- Avoid complex images with lots of detail or contrast, as they may not resize well.
See [this example](https://github.com/microsoft/promptflow/blob/main/examples/tools/tool-package-quickstart/my_tool_package/icons/custom-tool-icon.png) as a reference.
- Install dependencies to generate icon data URI:
```
pip install pillow
```
## Add tool icon with _icon_ parameter
Run the command below in your tool project directory to automatically generate your tool YAML, use _-i_ or _--icon_ parameter to add a custom tool icon:
```
python <promptflow github repo>\scripts\tool\generate_package_tool_meta.py -m <tool_module> -o <tool_yaml_path> -i <tool-icon-path>
```
Here we use [an existing tool project](https://github.com/microsoft/promptflow/tree/main/examples/tools/tool-package-quickstart) as an example.
```
cd D:\proj\github\promptflow\examples\tools\tool-package-quickstart
python D:\proj\github\promptflow\scripts\tool\generate_package_tool_meta.py -m my_tool_package.tools.my_tool_1 -o my_tool_package\yamls\my_tool_1.yaml -i my_tool_package\icons\custom-tool-icon.png
```
In the auto-generated tool YAML file, the custom tool icon data URI is added in the `icon` field:
```yaml
my_tool_package.tools.my_tool_1.my_tool:
function: my_tool
icon: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACR0lEQVR4nKWS3UuTcRTHP79nm9ujM+fccqFGI5viRRpjJgkJ3hiCENVN/QMWdBHUVRdBNwX9ARHd2FVEWFLRjaS9XPmSC/EFTNOWc3Pi48y9PHNzz68L7UXTCvreHM65+PA953uElFLyHzLvHMwsJrnzfJqFeAan3cKV9mr8XseeAOXX5vqjSS53jdF+tIz1nIFAMDCzwpvJ5b87+LSYYHw+gcWkEAwluXnOR2Q1R+9YjJ7BKJG4zoXmqr0ddL3+QnV5EeUOK821LsJammcjEeZiafJScrd3bm8H6zkDd4mVztZKAK49/Mj8is4Z/35GPq9R5VJ5GYztDtB1HT1vovGQSiqVAqDugI3I6jpP3i9x9VQVfu8+1N/OvbWCqqqoBSa6h1fQNA1N0xiYTWJSBCZF8HgwSjQapbRQ2RUg5NYj3O6ZochmYkFL03S4mImIzjFvCf2xS5gtCRYXWvBUvKXjyEVeTN/DXuDgxsnuzSMK4HTAw1Q0hZba4NXEKp0tbpq9VkxCwTAETrsVwxBIBIYhMPI7YqyrtONQzSznJXrO4H5/GJ9LUGg0YFYydJxoYnwpj1s9SEN5KzZz4fYYAW6dr+VsowdFgamlPE/Hs8SzQZYzg0S+zjIc6iOWDDEc6uND+N12B9/VVu+mrd79o38wFCCdTeBSK6hxBii1eahxBlAtRbsDdmoiHGRNj1NZ7GM0NISvzM9oaIhiqwOO/wMgl4FsRpLf2KxGXpLNSLLInzH+CWBIA6RECIGUEiEUpDRACBSh8A3pXfGWdXfMgAAAAABJRU5ErkJggg==
inputs:
connection:
type:
- CustomConnection
input_text:
type:
- string
module: my_tool_package.tools.my_tool_1
name: my_tool
type: python
```
## Verify the tool icon in VS Code extension
Follow [steps](create-and-use-tool-package.md#use-your-tool-from-vscode-extension) to use your tool from VS Code extension. Your tool displays with the custom icon:

## FAQ
### Can I preview the tool icon image before adding it to a tool?
Yes, you could run below command under the root folder to generate a data URI for your custom tool icon. Make sure the output file has an `.html` extension.
```
python <path-to-scripts>\tool\convert_image_to_data_url.py --image-path <image_input_path> -o <html_output_path>
```
For example:
```
python D:\proj\github\promptflow\scripts\tool\convert_image_to_data_url.py --image-path D:\proj\github\promptflow\examples\tools\tool-package-quickstart\my_tool_package\icons\custom-tool-icon.png -o output.html
```
The content of `output.html` looks like the following, open it in a web browser to preview the icon.
```html
<html>
<body>
<img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACR0lEQVR4nKWS3UuTcRTHP79nm9ujM+fccqFGI5viRRpjJgkJ3hiCENVN/QMWdBHUVRdBNwX9ARHd2FVEWFLRjaS9XPmSC/EFTNOWc3Pi48y9PHNzz68L7UXTCvreHM65+PA953uElFLyHzLvHMwsJrnzfJqFeAan3cKV9mr8XseeAOXX5vqjSS53jdF+tIz1nIFAMDCzwpvJ5b87+LSYYHw+gcWkEAwluXnOR2Q1R+9YjJ7BKJG4zoXmqr0ddL3+QnV5EeUOK821LsJammcjEeZiafJScrd3bm8H6zkDd4mVztZKAK49/Mj8is4Z/35GPq9R5VJ5GYztDtB1HT1vovGQSiqVAqDugI3I6jpP3i9x9VQVfu8+1N/OvbWCqqqoBSa6h1fQNA1N0xiYTWJSBCZF8HgwSjQapbRQ2RUg5NYj3O6ZochmYkFL03S4mImIzjFvCf2xS5gtCRYXWvBUvKXjyEVeTN/DXuDgxsnuzSMK4HTAw1Q0hZba4NXEKp0tbpq9VkxCwTAETrsVwxBIBIYhMPI7YqyrtONQzSznJXrO4H5/GJ9LUGg0YFYydJxoYnwpj1s9SEN5KzZz4fYYAW6dr+VsowdFgamlPE/Hs8SzQZYzg0S+zjIc6iOWDDEc6uND+N12B9/VVu+mrd79o38wFCCdTeBSK6hxBii1eahxBlAtRbsDdmoiHGRNj1NZ7GM0NISvzM9oaIhiqwOO/wMgl4FsRpLf2KxGXpLNSLLInzH+CWBIA6RECIGUEiEUpDRACBSh8A3pXfGWdXfMgAAAAABJRU5ErkJggg==" alt="My Image">
</body>
</html>
```
### Can I add a tool icon to an existing tool package?
Yes, you can refer to the [preview icon](add-a-tool-icon.md#can-i-preview-the-tool-icon-image-before-adding-it-to-a-tool) section to generate the data URI and manually add the data URI to the tool's YAML file.
### Can I add tool icons for dark and light mode separately?
Yes, you can add the tool icon data URIs manually or run the command below in your tool project directory to automatically generate your tool YAML, use _--icon-light_ to add a custom tool icon for the light mode and use _--icon-dark_ to add a custom tool icon for the dark mode:
```
python <promptflow github repo>\scripts\tool\generate_package_tool_meta.py -m <tool_module> -o <tool_yaml_path> --icon-light <light-tool-icon-path> --icon-dark <dark-tool-icon-path>
```
Here we use [an existing tool project](https://github.com/microsoft/promptflow/tree/main/examples/tools/tool-package-quickstart) as an example.
```
cd D:\proj\github\promptflow\examples\tools\tool-package-quickstart
python D:\proj\github\promptflow\scripts\tool\generate_package_tool_meta.py -m my_tool_package.tools.my_tool_1 -o my_tool_package\yamls\my_tool_1.yaml --icon-light my_tool_package\icons\custom-tool-icon-light.png --icon-dark my_tool_package\icons\custom-tool-icon-dark.png
```
In the auto-generated tool YAML file, the light and dark tool icon data URIs are added in the `icon` field:
```yaml
my_tool_package.tools.my_tool_1.my_tool:
function: my_tool
icon:
dark: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAB00lEQVR4nI1SO2iTURT+7iNNb16a+Cg6iJWqRKwVRIrWV6GVUkrFdqiVShBaxIIi4iY4iouDoy4ODkKn4uQkDs5FfEzFYjEtJYQo5P/z35j/3uNw7Z80iHqHC/ec8z3OuQeMMcYYAHenU8n84YMAABw7mo93dEQpAIyBAyAiF1Kq8/Wrl5fHR1x6tjC9uPBcSrlZD4BxIgIgBCei+bnC6cGxSuWHEEIIUa58H7l0dWZqwlqSUjhq7oDWEoAL584Y6ymljDHGmM543BhvaPAsAKLfEjIyB6BeryPw796+EWidUInr16b5z6rWAYCmKXeEEADGRy+SLgXlFfLWbbWoyytULZ4f6Hee2yDgnAG4OVsoff20try08eX92vLSzJVJAJw3q7dISSnDMFx48UypeCa97cPHz7fu3Y/FYo1Go8nbCiAiIUStVus/eaKvN691IAQnsltI24wZY9Kp1Ju373K5bDKZNMa6gf5ZIWrG9/0g0K3W/wYIw3Dvnq6dO7KNMPwvgOf5x3uPHOrp9n3/HwBrLYCu3bv6Tg0PjU0d2L8PAEWfDKCtac6YIVrfKN2Zn8tkUqvfigBaR88Ya66uezMgl93+9Mmjxw8fJBIqWv7NAvwCHeuq7gEPU/QAAAAASUVORK5CYII=
light: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAB2UlEQVR4nH1SO2hUQRQ9c18K33u72cXs7jOL8UeQCCJoJaIgKAiCWKilaGNlYREFDRGNjayVWKiFFmITECFKJKIokQRRsDFENoooUchHU5qdWZ2512KymxcNOcUwc5nDuefeA2FhZpGFU0S0Mf5S0zpdF2FhISgopUREKfXj59yhoycmPn4GAKDncuXa9VtKKWYGACgowHOdc9a6g0eOA7mx8apzzlp76vRZoGXw6XMRsdb6nwSAmYnoQ3Xi5fBIdk2SiSMiCoKgNZslteruvX4ASikvSwAEAGDqdYhAXO+VypevkwODQ4+HnlGcq2mDNLwtZq5pvWP3AYRJ0Lq2uG5rWNgYFjaBVt+8c19E/jRaWvQgImPj1e279ufaN8elzly5K1/u6r7QZ51zrjmoBqHJ+TU/39ax5cy5i53bdnb39KXtLpr28OMLgiCfz78YHpmemi0W2piZWdIWaMmDCIDWet/ePUlS0toQUWM8yxG8jrVuw/qOTBw19rUiQUQoCGZm50z9txf8By3/K0Rh+PDRk8lv3+MoWklBBACmpmdKxcKn96O3b1SqC6FSyxOUgohk4pjZ9T8YeDX6ptye+PoSpNIrfkGv3747fOzk+UtXjTE+BM14M8tfl7BQR9VzUXEAAAAASUVORK5CYII=
inputs:
connection:
type:
- CustomConnection
input_text:
type:
- string
module: my_tool_package.tools.my_tool_1
name: my_tool
type: python
```
Note: Both light and dark icons are optional. If you set either a light or dark icon, it will be used in its respective mode, and the system default icon will be used in the other mode. | promptflow/docs/how-to-guides/develop-a-tool/add-a-tool-icon.md/0 | {
"file_path": "promptflow/docs/how-to-guides/develop-a-tool/add-a-tool-icon.md",
"repo_id": "promptflow",
"token_count": 4044
} | 1 |
# Process image in flow
PromptFlow defines a contract to represent image data.
## Data class
`promptflow.contracts.multimedia.Image`
Image class is a subclass of `bytes`, thus you can access the binary data by directly using the object. It has an extra attribute `source_url` to store the origin url of the image, which would be useful if you want to pass the url instead of content of image to APIs like GPT-4V model.
## Data type in flow input
Set the type of flow input to `image` and promptflow will treat it as an image.
## Reference image in prompt template
In prompt templates that support image (e.g. in OpenAI GPT-4V tool), using markdown syntax to denote that a template input is an image: ``. In this case, `test_image` will be substituted with base64 or source_url (if set) before sending to LLM model.
## Serialization/Deserialization
Promptflow uses a special dict to represent image.
`{"data:image/<mime-type>;<representation>": "<value>"}`
- `<mime-type>` can be html standard [mime](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types/Common_types) image types. Setting it to specific type can help previewing the image correctly, or it can be `*` for unknown type.
- `<representation>` is the image serialized representation, there are 3 supported types:
- url
It can point to a public accessable web url. E.g.
{"data:image/png;url": "https://developer.microsoft.com/_devcom/images/logo-ms-social.png"}
- base64
It can be the base64 encoding of the image. E.g.
{"data:image/png;base64": "iVBORw0KGgoAAAANSUhEUgAAAGQAAABLAQMAAAC81rD0AAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAABlBMVEUAAP7////DYP5JAAAAAWJLR0QB/wIt3gAAAAlwSFlzAAALEgAACxIB0t1+/AAAAAd0SU1FB+QIGBcKN7/nP/UAAAASSURBVDjLY2AYBaNgFIwCdAAABBoAAaNglfsAAAAZdEVYdGNvbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVDnr0DLAAAAJXRFWHRkYXRlOmNyZWF0ZQAyMDIwLTA4LTI0VDIzOjEwOjU1KzAzOjAwkHdeuQAAACV0RVh0ZGF0ZTptb2RpZnkAMjAyMC0wOC0yNFQyMzoxMDo1NSswMzowMOEq5gUAAAAASUVORK5CYII="}
- path
It can reference an image file on local disk. Both absolute path and relative path are supported, but in the cases where the serialized image representation is stored in a file, relative to the containing folder of that file is recommended, as in the case of flow IO data. E.g.
{"data:image/png;path": "./my-image.png"}
Please note that `path` representation is not supported in Deployment scenario.
## Batch Input data
Batch input data containing image can be of 2 formats:
1. The same jsonl format of regular batch input, except that some column may be seriliazed image data or composite data type (dict/list) containing images. The serialized images can only be Url or Base64. E.g.
```json
{"question": "How many colors are there in the image?", "input_image": {"data:image/png;url": "https://developer.microsoft.com/_devcom/images/logo-ms-social.png"}}
{"question": "What's this image about?", "input_image": {"data:image/png;url": "https://developer.microsoft.com/_devcom/images/404.png"}}
```
2. A folder containing a jsonl file under root path, which contains serialized image in File Reference format. The referenced file are stored in the folder and their relative path to the root path is used as path in the file reference. Here is a sample batch input, note that the name of `input.jsonl` is arbitrary as long as it's a jsonl file:
```
BatchInputFolder
|----input.jsonl
|----image1.png
|----image2.png
```
Content of `input.jsonl`
```json
{"question": "How many colors are there in the image?", "input_image": {"data:image/png;path": "image1.png"}}
{"question": "What's this image about?", "input_image": {"data:image/png;path": "image2.png"}}
```
| promptflow/docs/how-to-guides/process-image-in-flow.md/0 | {
"file_path": "promptflow/docs/how-to-guides/process-image-in-flow.md",
"repo_id": "promptflow",
"token_count": 1332
} | 2 |
# Azure OpenAI GPT-4 Turbo with Vision
## Introduction
Azure OpenAI GPT-4 Turbo with Vision tool enables you to leverage your AzureOpenAI GPT-4 Turbo with Vision model deployment to analyze images and provide textual responses to questions about them.
## Prerequisites
- Create AzureOpenAI resources
Create Azure OpenAI resources with [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal)
- Create a GPT-4 Turbo with Vision deployment
Browse to [Azure OpenAI Studio](https://oai.azure.com/) and sign in with the credentials associated with your Azure OpenAI resource. During or after the sign-in workflow, select the appropriate directory, Azure subscription, and Azure OpenAI resource.
Under Management select Deployments and Create a GPT-4 Turbo with Vision deployment by selecting model name: `gpt-4` and model version `vision-preview`.
## Connection
Setup connections to provisioned resources in prompt flow.
| Type | Name | API KEY | API Type | API Version |
|-------------|----------|----------|----------|-------------|
| AzureOpenAI | Required | Required | Required | Required |
## Inputs
| Name | Type | Description | Required |
|------------------------|-------------|------------------------------------------------------------------------------------------------|----------|
| connection | AzureOpenAI | the AzureOpenAI connection to be used in the tool | Yes |
| deployment\_name | string | the language model to use | Yes |
| prompt | string | The text prompt that the language model will use to generate it's response. | Yes |
| max\_tokens | integer | the maximum number of tokens to generate in the response. Default is 512. | No |
| temperature | float | the randomness of the generated text. Default is 1. | No |
| stop | list | the stopping sequence for the generated text. Default is null. | No |
| top_p | float | the probability of using the top choice from the generated tokens. Default is 1. | No |
| presence\_penalty | float | value that controls the model's behavior with regards to repeating phrases. Default is 0. | No |
| frequency\_penalty | float | value that controls the model's behavior with regards to generating rare phrases. Default is 0. | No |
## Outputs
| Return Type | Description |
|-------------|------------------------------------------|
| string | The text of one response of conversation |
| promptflow/docs/reference/tools-reference/aoai-gpt4-turbo-vision.md/0 | {
"file_path": "promptflow/docs/reference/tools-reference/aoai-gpt4-turbo-vision.md",
"repo_id": "promptflow",
"token_count": 1178
} | 3 |
# Working with Connection
This folder contains example `YAML` files for creating `connection` using `pf` cli. Learn more on all the [connections types](https://microsoft.github.io/promptflow/concepts/concept-connections.html).
## Prerequisites
- Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Get started
- To create a connection using any of the sample `YAML` files provided in this directory, execute following command:
```bash
# Override keys with --set to avoid yaml file changes
pf connection create -f custom.yml --set configs.key1='<your_api_key>'
pf connection create -f azure_openai.yml --set api_key='<your_api_key>'
```
- To create a custom connection using an `.env` file, execute following command:
```bash
pf connection create -f .env --name custom_connection
```
- To list the created connection, execute following command:
```bash
pf connection list
```
- To show one connection details, execute following command:
```bash
pf connection show --name custom_connection
```
- To update a connection that in workspace, execute following command. Currently only a few fields(description, display_name) support update:
```bash
# Update an existing connection with --set to override values
# Update an azure open ai connection with a new api base
pf connection update -n open_ai_connection --set api_base='<your_api_base>'
# Update a custom connection
pf connection update -n custom_connection --set configs.key1='<your_new_key>' secrets.key2='<your_another_key>'
```
- To delete a connection:
```bash
pf connection delete -n custom_connection
```
| promptflow/examples/connections/README.md/0 | {
"file_path": "promptflow/examples/connections/README.md",
"repo_id": "promptflow",
"token_count": 458
} | 4 |
{
"package": {},
"code": {
"chat.jinja2": {
"type": "llm",
"inputs": {
"chat_history": {
"type": [
"string"
]
},
"question": {
"type": [
"string"
]
}
},
"source": "chat.jinja2"
},
"chat_variant_1.jinja2": {
"type": "llm",
"inputs": {
"chat_history": {
"type": [
"string"
]
},
"question": {
"type": [
"string"
]
}
},
"source": "chat_variant_1.jinja2"
},
"chat_variant_2.jinja2": {
"type": "llm",
"inputs": {
"chat_history": {
"type": [
"string"
]
},
"question": {
"type": [
"string"
]
}
},
"source": "chat_variant_2.jinja2"
},
"extract_result.py": {
"type": "python",
"inputs": {
"input1": {
"type": [
"string"
]
}
},
"source": "extract_result.py",
"function": "my_python_tool"
}
}
} | promptflow/examples/flows/chat/chat-math-variant/.promptflow/flow.tools.json/0 | {
"file_path": "promptflow/examples/flows/chat/chat-math-variant/.promptflow/flow.tools.json",
"repo_id": "promptflow",
"token_count": 746
} | 5 |
<jupyter_start><jupyter_code>from main import chat_with_pdf, print_stream_and_return_full_answer
from dotenv import load_dotenv
load_dotenv()
bert_paper_url = "https://arxiv.org/pdf/1810.04805.pdf"
questions = [
"what is BERT?",
"what NLP tasks does it perform well?",
"is BERT suitable for NER?",
"is it better than GPT",
"when was GPT come up?",
"when was BERT come up?",
"so about same time?",
]
history = []
for q in questions:
stream, context = chat_with_pdf(q, bert_paper_url, history)
print("User: " + q, flush=True)
print("Bot: ", end="", flush=True)
answer = print_stream_and_return_full_answer(stream)
history = history + [
{"role": "user", "content": q},
{"role": "assistant", "content": answer},
]<jupyter_output><empty_output> | promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/test.ipynb/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/test.ipynb",
"repo_id": "promptflow",
"token_count": 321
} | 6 |
inputs:
chat_history:
type: list
default: []
pdf_url:
type: string
default: https://arxiv.org/pdf/1810.04805.pdf
question:
type: string
is_chat_input: true
default: what NLP tasks does it perform well?
outputs:
answer:
type: string
is_chat_output: true
reference: ${qna_tool.output.answer}
context:
type: string
reference: ${qna_tool.output.context}
nodes:
- name: setup_env
type: python
source:
type: code
path: setup_env.py
inputs:
conn: my_custom_connection
- name: download_tool
type: python
source:
type: code
path: download_tool.py
inputs:
url: ${inputs.pdf_url}
env_ready_signal: ${setup_env.output}
- name: build_index_tool
type: python
source:
type: code
path: build_index_tool.py
inputs:
pdf_path: ${download_tool.output}
- name: qna_tool
type: python
source:
type: code
path: qna_tool.py
inputs:
question: ${rewrite_question_tool.output}
index_path: ${build_index_tool.output}
history: ${inputs.chat_history}
- name: rewrite_question_tool
type: python
source:
type: code
path: rewrite_question_tool.py
inputs:
question: ${inputs.question}
history: ${inputs.chat_history}
| promptflow/examples/flows/chat/chat-with-pdf/flow.dag.yaml.multi-node/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/flow.dag.yaml.multi-node",
"repo_id": "promptflow",
"token_count": 501
} | 7 |
from promptflow import tool
@tool
def process_search_result(search_result):
def format(doc: dict):
return f"Content: {doc['Content']}\nSource: {doc['Source']}"
try:
context = []
for url, content in search_result:
context.append({"Content": content, "Source": url})
context_str = "\n\n".join([format(c) for c in context])
return context_str
except Exception as e:
print(f"Error: {e}")
return ""
| promptflow/examples/flows/chat/chat-with-wikipedia/process_search_result.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-wikipedia/process_search_result.py",
"repo_id": "promptflow",
"token_count": 198
} | 8 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
groundtruth:
type: string
default: groundtruth
prediction:
type: string
default: prediction
outputs:
results:
type: string
reference: ${line_process.output}
nodes:
- name: line_process
type: python
source:
type: code
path: line_process.py
inputs:
groundtruth: ${inputs.groundtruth}
prediction: ${inputs.prediction}
- name: aggregate
type: python
source:
type: code
path: aggregate.py
inputs:
processed_results: ${line_process.output}
aggregation: true
environment:
python_requirements_txt: requirements.txt
| promptflow/examples/flows/evaluation/eval-basic/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-basic/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 242
} | 9 |
system:
You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric.
user:
Equivalence, as a metric, measures the similarity between the predicted answer and the correct answer. If the information and content in the predicted answer is similar or equivalent to the correct answer, then the value of the Equivalence metric should be high, else it should be low. Given the question, correct answer, and predicted answer, determine the value of Equivalence metric using the following rating scale:
One star: the predicted answer is not at all similar to the correct answer
Two stars: the predicted answer is mostly not similar to the correct answer
Three stars: the predicted answer is somewhat similar to the correct answer
Four stars: the predicted answer is mostly similar to the correct answer
Five stars: the predicted answer is completely similar to the correct answer
This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5.
The examples below show the Equivalence score for a question, a correct answer, and a predicted answer.
question: What is the role of ribosomes?
correct answer: Ribosomes are cellular structures responsible for protein synthesis. They interpret the genetic information carried by messenger RNA (mRNA) and use it to assemble amino acids into proteins.
predicted answer: Ribosomes participate in carbohydrate breakdown by removing nutrients from complex sugar molecules.
stars: 1
question: Why did the Titanic sink?
correct answer: The Titanic sank after it struck an iceberg during its maiden voyage in 1912. The impact caused the ship's hull to breach, allowing water to flood into the vessel. The ship's design, lifeboat shortage, and lack of timely rescue efforts contributed to the tragic loss of life.
predicted answer: The sinking of the Titanic was a result of a large iceberg collision. This caused the ship to take on water and eventually sink, leading to the death of many passengers due to a shortage of lifeboats and insufficient rescue attempts.
stars: 2
question: What causes seasons on Earth?
correct answer: Seasons on Earth are caused by the tilt of the Earth's axis and its revolution around the Sun. As the Earth orbits the Sun, the tilt causes different parts of the planet to receive varying amounts of sunlight, resulting in changes in temperature and weather patterns.
predicted answer: Seasons occur because of the Earth's rotation and its elliptical orbit around the Sun. The tilt of the Earth's axis causes regions to be subjected to different sunlight intensities, which leads to temperature fluctuations and alternating weather conditions.
stars: 3
question: How does photosynthesis work?
correct answer: Photosynthesis is a process by which green plants and some other organisms convert light energy into chemical energy. This occurs as light is absorbed by chlorophyll molecules, and then carbon dioxide and water are converted into glucose and oxygen through a series of reactions.
predicted answer: In photosynthesis, sunlight is transformed into nutrients by plants and certain microorganisms. Light is captured by chlorophyll molecules, followed by the conversion of carbon dioxide and water into sugar and oxygen through multiple reactions.
stars: 4
question: What are the health benefits of regular exercise?
correct answer: Regular exercise can help maintain a healthy weight, increase muscle and bone strength, and reduce the risk of chronic diseases. It also promotes mental well-being by reducing stress and improving overall mood.
predicted answer: Routine physical activity can contribute to maintaining ideal body weight, enhancing muscle and bone strength, and preventing chronic illnesses. In addition, it supports mental health by alleviating stress and augmenting general mood.
stars: 5
question: {{question}}
correct answer:{{ground_truth}}
predicted answer: {{answer}}
stars: | promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_similarity_prompt.jinja2/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_similarity_prompt.jinja2",
"repo_id": "promptflow",
"token_count": 828
} | 10 |
from promptflow import tool
@tool
def select_metrics(metrics: str) -> str:
supported_metrics = ('gpt_relevance', 'gpt_groundedness', 'gpt_retrieval_score')
user_selected_metrics = [metric.strip() for metric in metrics.split(',') if metric]
metric_selection_dict = {}
for metric in supported_metrics:
if metric in user_selected_metrics:
metric_selection_dict[metric] = True
else:
metric_selection_dict[metric] = False
return metric_selection_dict
| promptflow/examples/flows/evaluation/eval-qna-rag-metrics/select_metrics.py/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-qna-rag-metrics/select_metrics.py",
"repo_id": "promptflow",
"token_count": 196
} | 11 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
environment:
python_requirements_txt: requirements.txt
inputs:
chat_history:
type: list
is_chat_history: true
utterance:
type: string
is_chat_input: true
default: Play BB King and increase the volume.
outputs:
intents:
type: string
reference: ${Conversational_Language_Understanding.output}
is_chat_output: true
nodes:
- name: LLM_Rewrite
type: llm
source:
type: code
path: chat.jinja2
inputs:
deployment_name: cluGPTTurbo
max_tokens: 256
temperature: 0.7
question: ${inputs.utterance}
connection: CLUGPTModel
api: chat
- name: Conversational_Language_Understanding
type: python
source:
type: package
tool: language_tools.tools.conversational_language_understanding.get_conversational_language_understanding
inputs:
connection: azure_ai_language_connection
language: en-us
utterances: ${LLM_Rewrite.output}
project_name: MediaPlayer
deployment_name: adv
parse_response: false
| promptflow/examples/flows/integrations/azure-ai-language/multi_intent_conversational_language_understanding/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/integrations/azure-ai-language/multi_intent_conversational_language_understanding/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 389
} | 12 |
# Basic flow with builtin llm tool
A basic standard flow that calls Azure OpenAI with builtin llm tool.
Tools used in this flow:
- `prompt` tool
- built-in `llm` tool
Connections used in this flow:
- `azure_open_ai` connection
## Prerequisites
Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Setup connection
Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one.
Note in this example, we are using [chat api](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt?pivots=programming-language-chat-completions), please use `gpt-35-turbo` or `gpt-4` model deployment.
Ensure you have created `open_ai_connection` connection before.
```bash
pf connection show -n open_ai_connection
```
Create connection if you haven't done that. Ensure you have put your azure open ai endpoint key in [azure_openai.yml](../../../connections/azure_openai.yml) file.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create -f ../../../connections/azure_openai.yml --name open_ai_connection --set api_key=<your_api_key> api_base=<your_api_base>
```
## Run flow
### Run with single line input
```bash
# test with default input value in flow.dag.yaml
pf flow test --flow .
# test with inputs
pf flow test --flow . --inputs text="Python Hello World!"
```
### run with multiple lines data
- create run
```bash
pf run create --flow . --data ./data.jsonl --column-mapping text='${data.text}' --stream
```
You can also skip providing `column-mapping` if provided data has same column name as the flow.
Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
- list and show run meta
```bash
# list created run
pf run list
# get a sample run name
name=$(pf run list -r 10 | jq '.[] | select(.name | contains("basic_with_builtin_llm")) | .name'| head -n 1 | tr -d '"')
# show specific run detail
pf run show --name $name
# show output
pf run show-details --name $name
# visualize run in browser
pf run visualize --name $name
```
| promptflow/examples/flows/standard/basic-with-builtin-llm/README.md/0 | {
"file_path": "promptflow/examples/flows/standard/basic-with-builtin-llm/README.md",
"repo_id": "promptflow",
"token_count": 730
} | 13 |
system:
There is a search bar in the mall APP and users can enter any query in the search bar.
The user may want to search for orders, view product information, or seek recommended products.
Therefore, please classify user intentions into the following three types according to the query: product_recommendation, order_search, product_info
Please note that only the above three situations can be returned, and try not to include other return values.
user:
The user's query is {{query}} | promptflow/examples/flows/standard/conditional-flow-for-switch/classify_with_llm.jinja2/0 | {
"file_path": "promptflow/examples/flows/standard/conditional-flow-for-switch/classify_with_llm.jinja2",
"repo_id": "promptflow",
"token_count": 110
} | 14 |
import logging
import os
from urllib.parse import urlparse
import requests
class File:
def __init__(self, source: str):
self._source = source
self._is_url = source.startswith("http://") or source.startswith("https://")
if self._is_url:
parsed_url = urlparse(source)
path = parsed_url.path
else:
path = source
self._path = os.path.normpath(os.path.abspath(path))
self._dirname = os.path.dirname(self._path)
self._filename = os.path.basename(self._path).split(".")[0]
self._language = os.path.basename(self._path).split(".")[1]
def _read_content(self):
if self._is_url:
response = requests.get(self.source)
if response.status_code == 200:
content = response.text
return content
else:
print(f"Failed to retrieve content from URL: {self.source}")
return None
else:
try:
with open(self._path, "r") as file:
content = file.read()
return content
except FileNotFoundError:
print(f"File not found: {self.source}")
return None
@property
def content(self) -> str:
if not hasattr(self, "_text"):
self._content = self._read_content()
return self._content
@property
def language(self) -> str:
return self._language
@property
def filename(self) -> str:
return self._filename
@property
def dirname(self) -> str:
return self._dirname
@property
def source(self) -> str:
return self._source
def override_origin_file(self, content: str) -> None:
if not self._is_url:
with open(self._path, "w") as f:
f.write(content)
else:
logging.warning("Cannot override origin file from URL, create a new file instead.")
self.create_new_file(content)
def create_new_file(self, content: str) -> None:
if self._is_url:
path = os.path.join(
'./',
self.filename + f"_doc.{self.language}",
)
else:
path = os.path.join(
self.dirname,
self.filename + f"_doc.{self.language}",
)
with open(path, "w") as f:
f.write(content)
| promptflow/examples/flows/standard/gen-docstring/file.py/0 | {
"file_path": "promptflow/examples/flows/standard/gen-docstring/file.py",
"repo_id": "promptflow",
"token_count": 1206
} | 15 |
my_tool_package.tools.tool_with_generated_by_input.my_tool:
function: my_tool
inputs:
index_json:
type:
- string
generated_by:
func_path: my_tool_package.tools.tool_with_generated_by_input.generate_index_json
func_kwargs:
- name: index_type
type:
- string
reference: ${inputs.index_type}
- name: index
type:
- string
optional: true
reference: ${inputs.index}
- name: index_connection
type: [CognitiveSearchConnection]
optional: true
reference: ${inputs.index_connection}
- name: index_name
type:
- string
optional: true
reference: ${inputs.index_name}
- name: content_field
type:
- string
optional: true
reference: ${inputs.content_field}
- name: embedding_field
type:
- string
optional: true
reference: ${inputs.embedding_field}
- name: metadata_field
type:
- string
optional: true
reference: ${inputs.metadata_field}
- name: semantic_configuration
type:
- string
optional: true
reference: ${inputs.semantic_configuration}
- name: embedding_connection
type: [AzureOpenAIConnection, OpenAIConnection]
optional: true
reference: ${inputs.embedding_connection}
- name: embedding_deployment
type:
- string
optional: true
reference: ${inputs.embedding_deployment}
reverse_func_path: my_tool_package.tools.tool_with_generated_by_input.reverse_generate_index_json
queries:
type:
- string
top_k:
type:
- int
index_type:
type:
- string
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_index_types
input_type: uionly_hidden
index:
type:
- string
enabled_by: index_type
enabled_by_value: ["Workspace MLIndex"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_indexes
input_type: uionly_hidden
index_connection:
type: [CognitiveSearchConnection]
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
input_type: uionly_hidden
index_name:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
input_type: uionly_hidden
content_field:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_fields
input_type: uionly_hidden
embedding_field:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_fields
input_type: uionly_hidden
metadata_field:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_fields
input_type: uionly_hidden
semantic_configuration:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_semantic_configuration
input_type: uionly_hidden
embedding_connection:
type: [AzureOpenAIConnection, OpenAIConnection]
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
input_type: uionly_hidden
embedding_deployment:
type:
- string
enabled_by: index_type
enabled_by_value: ["Azure Cognitive Search"]
dynamic_list:
func_path: my_tool_package.tools.tool_with_generated_by_input.list_embedding_deployment
func_kwargs:
- name: embedding_connection
type:
- string
reference: ${inputs.embedding_connection}
input_type: uionly_hidden
module: my_tool_package.tools.tool_with_generated_by_input
name: Tool with Generated By Input
description: This is a tool with generated by input
type: python
| promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_generated_by_input.yaml/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_generated_by_input.yaml",
"repo_id": "promptflow",
"token_count": 2072
} | 16 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs: {}
outputs:
output:
type: string
reference: ${My_Tool_with_Dynamic_List_Input_cywc.output}
nodes:
- name: My_Tool_with_Dynamic_List_Input_cywc
type: python
source:
type: package
tool: my_tool_package.tools.tool_with_dynamic_list_input.my_tool
inputs:
input_prefix: hi
input_text:
- grape3
- elderberry5
endpoint_name: my_endpoint
| promptflow/examples/tools/use-cases/dynamic-list-input-tool-showcase/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/tools/use-cases/dynamic-list-input-tool-showcase/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 192
} | 17 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
flow_input:
type: string
outputs:
output:
type: object
reference: ${echo_connection.output}
nodes:
- name: echo_connection
type: python
source:
type: code
path: echo_connection.py
inputs:
flow_input: ${inputs.flow_input}
node_input: "dummy_node_input"
connection: open_ai_connection
| promptflow/examples/tutorials/flow-deploy/create-service-with-flow/echo_connection_flow/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/tutorials/flow-deploy/create-service-with-flow/echo_connection_flow/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 162
} | 18 |
<jupyter_start><jupyter_text>Run prompt flow in Azure AI**Requirements** - In order to benefit from this tutorial, you will need:- An Azure account with an active subscription - [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)- An Azure ML workspace - [Configure workspace](../../configuration.ipynb)- A python environment- Installed prompt flow SDK**Learning Objectives** - By the end of this tutorial, you should be able to:- Connect to your Azure AI workspace from the Python SDK- Create and develop a new promptflow run- Evaluate the run with a evaluation flow**Motivations** - This guide will walk you through the main user journey of prompt flow code-first experience. You will learn how to create and develop your first prompt flow, test and evaluate it. 0. Install dependent packages<jupyter_code>%pip install -r ../../requirements.txt<jupyter_output><empty_output><jupyter_text>1. Connect to Azure Machine Learning WorkspaceThe [workspace](https://docs.microsoft.com/en-us/azure/machine-learning/concept-workspace) is the top-level resource for Azure Machine Learning, providing a centralized place to work with all the artifacts you create when you use Azure Machine Learning. In this section we will connect to the workspace in which the job will be run. 1.1 Import the required libraries<jupyter_code>import json
# Import required libraries
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
# azure version promptflow apis
from promptflow.azure import PFClient<jupyter_output><empty_output><jupyter_text>1.2 Configure credentialWe are using `DefaultAzureCredential` to get access to workspace. `DefaultAzureCredential` should be capable of handling most Azure SDK authentication scenarios. Reference for more available credentials if it does not work for you: [configure credential example](../../configuration.ipynb), [azure-identity reference doc](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python).<jupyter_code>try:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception as ex:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
credential = InteractiveBrowserCredential()<jupyter_output><empty_output><jupyter_text>1.3 Get a handle to the workspaceWe use config file to connect to a workspace. The Azure ML workspace should be configured with computer cluster. [Check this notebook for configure a workspace](../../configuration.ipynb)<jupyter_code># Get a handle to workspace
pf = PFClient.from_config(credential=credential)<jupyter_output><empty_output><jupyter_text>1.4 Create necessary connectionsConnection helps securely store and manage secret keys or other sensitive credentials required for interacting with LLM and other external tools for example Azure Content Safety.In this notebook, we will use flow `web-classification` which uses connection `azure_open_ai_connection` inside, we need to set up the connection if we haven't added it before.Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one.Please go to [workspace portal](https://ml.azure.com/), click `Prompt flow` -> `Connections` -> `Create`, then follow the instruction to create your own connections. Learn more on [connections](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/concept-connections?view=azureml-api-2). 2. Create a new run`web-classification` is a flow demonstrating multi-class classification with LLM. Given an url, it will classify the url into one web category with just a few shots, simple summarization and classification prompts. Set flow path and input data<jupyter_code># load flow
flow = "../../flows/standard/web-classification"
data = "../../flows/standard/web-classification/data.jsonl"<jupyter_output><empty_output><jupyter_text>Submit run<jupyter_code># create run
base_run = pf.run(
flow=flow,
data=data,
)
print(base_run)
pf.stream(base_run)
details = pf.get_details(base_run)
details.head(10)
pf.visualize(base_run)<jupyter_output><empty_output><jupyter_text>3. Evaluate your flow run resultThen you can use an evaluation method to evaluate your flow. The evaluation methods are also flows which use Python or LLM etc., to calculate metrics like accuracy, relevance score.In this notebook, we use `eval-classification-accuracy` flow to evaluate. This is a flow illustrating how to evaluate the performance of a classification system. It involves comparing each prediction to the groundtruth and assigns a "Correct" or "Incorrect" grade, and aggregating the results to produce metrics such as accuracy, which reflects how good the system is at classifying the data.<jupyter_code>eval_run = pf.run(
flow="../../flows/evaluation/eval-classification-accuracy",
data=data,
run=base_run,
column_mapping={
"groundtruth": "${data.answer}",
"prediction": "${run.outputs.category}",
},
)
pf.stream(eval_run)
details = pf.get_details(eval_run)
details.head(10)
metrics = pf.get_metrics(eval_run)
print(json.dumps(metrics, indent=4))
pf.visualize([base_run, eval_run])<jupyter_output><empty_output><jupyter_text>Create another run with different variant nodeIn this example, `web-classification`'s node `summarize_text_content` has two variants: `variant_0` and `variant_1`. The difference between them is the inputs parameters:variant_0: - inputs: - deployment_name: gpt-35-turbo - max_tokens: '128' - temperature: '0.2' - text: ${fetch_text_content_from_url.output}variant_1: - inputs: - deployment_name: gpt-35-turbo - max_tokens: '256' - temperature: '0.3' - text: ${fetch_text_content_from_url.output}You can check the whole flow definition at [flow.dag.yaml](../../flows/standard/web-classification/flow.dag.yaml)<jupyter_code># use the variant1 of the summarize_text_content node.
variant_run = pf.run(
flow=flow,
data=data,
variant="${summarize_text_content.variant_1}", # here we specify node "summarize_text_content" to use variant 1 version.
)
pf.stream(variant_run)
details = pf.get_details(variant_run)
details.head(10)<jupyter_output><empty_output><jupyter_text>Run evaluation against variant run<jupyter_code>eval_flow = "../../flows/evaluation/eval-classification-accuracy"
eval_run_variant = pf.run(
flow=eval_flow,
data="../../flows/standard/web-classification/data.jsonl", # path to the data file
run=variant_run, # use run as the variant
column_mapping={
# reference data
"groundtruth": "${data.answer}",
# reference the run's output
"prediction": "${run.outputs.category}",
},
)
pf.stream(eval_run_variant)
details = pf.get_details(eval_run_variant)
details.head(10)
metrics = pf.get_metrics(eval_run_variant)
print(json.dumps(metrics, indent=4))
pf.visualize([eval_run, eval_run_variant])<jupyter_output><empty_output> | promptflow/examples/tutorials/get-started/quickstart-azure.ipynb/0 | {
"file_path": "promptflow/examples/tutorials/get-started/quickstart-azure.ipynb",
"repo_id": "promptflow",
"token_count": 2231
} | 19 |
.title {
font-weight:700;
}
.sd-card-header {
font-weight:700;
font-size: 16px;
}
.bd-page-width {
max-width: 100rem;
}
.bd-sidebar-primary {
flex: 0 0 20%;
}
.bd-main .bd-content .bd-article-container {
max-width: 70em;
}
html[data-theme="light"] {
--header-announcement-color: #fff070;
}
html[data-theme="dark"] {
--header-announcement-color: #4d4d00;
}
.bd-header-announcement {
background: var(--header-announcement-color);
}
/* (A) LIGHTBOX BACKGROUND */
#lightbox {
/* (A1) COVERS FULLSCREEN */
position: fixed; z-index: 1060;
top: 0; left: 0;
width: 100%; height: 100%;
/* (A2) BACKGROUND */
background: rgba(0, 0, 0, 0.5);
/* (A3) CENTER IMAGE ON SCREEN */
display: flex;
align-items: center;
align-items: center;
/* (A4) HIDDEN BY DEFAULT */
visibility: hidden;
opacity: 0;
/* (A5) SHOW/HIDE ANIMATION */
transition: opacity ease 0.4s;
}
/* (A6) TOGGLE VISIBILITY */
#lightbox.show {
visibility: visible;
opacity: 1;
}
/* (B) LIGHTBOX IMAGE */
#lightbox img {
/* (B1) DIMENSIONS */
width: 100%;
height: 100%;
/* (B2) IMAGE FIT */
/* contain | cover | fill | scale-down */
object-fit: contain;
}
| promptflow/scripts/docs/_static/custom.css/0 | {
"file_path": "promptflow/scripts/docs/_static/custom.css",
"repo_id": "promptflow",
"token_count": 554
} | 20 |
- name: {{ step_name }}
working-directory: examples
run: |
python -m pip install --upgrade pip
pip install -r dev_requirements.txt | promptflow/scripts/readme/ghactions_driver/workflow_steps/step_install_dev_deps.yml.jinja2/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/workflow_steps/step_install_dev_deps.yml.jinja2",
"repo_id": "promptflow",
"token_count": 47
} | 21 |
$schema: https://azuremlschemas.azureedge.net/latest/environment.schema.json
name: chat-with-pdf
build:
path: context
inference_config:
liveness_route:
port: 8080
path: /health
readiness_route:
port: 8080
path: /health
scoring_route:
port: 8080
path: /score
| promptflow/scripts/runtime_mgmt/runtime-env/env.yaml/0 | {
"file_path": "promptflow/scripts/runtime_mgmt/runtime-env/env.yaml",
"repo_id": "promptflow",
"token_count": 117
} | 22 |
{{ package_name }}.tools.{{ tool_name }}.{{ class_name }}.{{ function_name }}:
class_name: {{ class_name }}
function: {{ function_name }}
inputs:
url:
type:
- string
query:
type:
- string
module: {{ package_name }}.tools.{{ tool_name }}
name: Hello World Tool
description: This is hello world tool
type: python
| promptflow/scripts/tool/templates/tool2.yaml.j2/0 | {
"file_path": "promptflow/scripts/tool/templates/tool2.yaml.j2",
"repo_id": "promptflow",
"token_count": 133
} | 23 |
promptflow.tools.azure_content_safety.analyze_text:
module: promptflow.tools.azure_content_safety
function: analyze_text
inputs:
connection:
type:
- AzureContentSafetyConnection
hate_category:
default: medium_sensitivity
enum:
- disable
- low_sensitivity
- medium_sensitivity
- high_sensitivity
type:
- string
self_harm_category:
default: medium_sensitivity
enum:
- disable
- low_sensitivity
- medium_sensitivity
- high_sensitivity
type:
- string
sexual_category:
default: medium_sensitivity
enum:
- disable
- low_sensitivity
- medium_sensitivity
- high_sensitivity
type:
- string
text:
type:
- string
violence_category:
default: medium_sensitivity
enum:
- disable
- low_sensitivity
- medium_sensitivity
- high_sensitivity
type:
- string
name: Content Safety (Text Analyze)
description: Use Azure Content Safety to detect harmful content.
type: python
deprecated_tools:
- content_safety_text.tools.content_safety_text_tool.analyze_text
| promptflow/src/promptflow-tools/promptflow/tools/yamls/azure_content_safety.yaml/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/yamls/azure_content_safety.yaml",
"repo_id": "promptflow",
"token_count": 492
} | 24 |
# Prompt flow
[](https://pypi.org/project/promptflow/)
[](https://pypi.python.org/pypi/promptflow/)
[](https://pypi.org/project/promptflow/)
[](https://microsoft.github.io/promptflow/reference/pf-command-reference.html)
[](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow)
[](https://microsoft.github.io/promptflow/index.html)
[](https://github.com/microsoft/promptflow/issues/new/choose)
[](https://github.com/microsoft/promptflow/issues/new/choose)
[](https://github.com/microsoft/promptflow/blob/main/CONTRIBUTING.md)
[](https://github.com/microsoft/promptflow/blob/main/LICENSE)
> Welcome to join us to make prompt flow better by
> participating [discussions](https://github.com/microsoft/promptflow/discussions),
> opening [issues](https://github.com/microsoft/promptflow/issues/new/choose),
> submitting [PRs](https://github.com/microsoft/promptflow/pulls).
**Prompt flow** is a suite of development tools designed to streamline the end-to-end development cycle of LLM-based AI applications, from ideation, prototyping, testing, evaluation to production deployment and monitoring. It makes prompt engineering much easier and enables you to build LLM apps with production quality.
With prompt flow, you will be able to:
- **Create and iteratively develop flow**
- Create executable [flows](https://microsoft.github.io/promptflow/concepts/concept-flows.html) that link LLMs, prompts, Python code and other [tools](https://microsoft.github.io/promptflow/concepts/concept-tools.html) together.
- Debug and iterate your flows, especially the [interaction with LLMs](https://microsoft.github.io/promptflow/concepts/concept-connections.html) with ease.
- **Evaluate flow quality and performance**
- Evaluate your flow's quality and performance with larger datasets.
- Integrate the testing and evaluation into your CI/CD system to ensure quality of your flow.
- **Streamlined development cycle for production**
- Deploy your flow to the serving platform you choose or integrate into your app's code base easily.
- (Optional but highly recommended) Collaborate with your team by leveraging the cloud version of [prompt flow in Azure AI](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/overview-what-is-prompt-flow?view=azureml-api-2).
------
## Installation
Ensure you have a python environment, `python=3.9` is recommended.
```sh
pip install promptflow promptflow-tools
```
## Quick Start ⚡
**Create a chatbot with prompt flow**
Run the command to initiate a prompt flow from a chat template, it creates folder named `my_chatbot` and generates required files within it:
```sh
pf flow init --flow ./my_chatbot --type chat
```
**Setup a connection for your API key**
For OpenAI key, establish a connection by running the command, using the `openai.yaml` file in the `my_chatbot` folder, which stores your OpenAI key:
```sh
# Override keys with --set to avoid yaml file changes
pf connection create --file ./my_chatbot/openai.yaml --set api_key=<your_api_key> --name open_ai_connection
```
For Azure OpenAI key, establish the connection by running the command, using the `azure_openai.yaml` file:
```sh
pf connection create --file ./my_chatbot/azure_openai.yaml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
**Chat with your flow**
In the `my_chatbot` folder, there's a `flow.dag.yaml` file that outlines the flow, including inputs/outputs, nodes, connection, and the LLM model, etc
> Note that in the `chat` node, we're using a connection named `open_ai_connection` (specified in `connection` field) and the `gpt-35-turbo` model (specified in `deployment_name` field). The deployment_name filed is to specify the OpenAI model, or the Azure OpenAI deployment resource.
Interact with your chatbot by running: (press `Ctrl + C` to end the session)
```sh
pf flow test --flow ./my_chatbot --interactive
```
#### Continue to delve deeper into [prompt flow](https://github.com/microsoft/promptflow).
| promptflow/src/promptflow/README.md/0 | {
"file_path": "promptflow/src/promptflow/README.md",
"repo_id": "promptflow",
"token_count": 1489
} | 25 |
import os
from promptflow._cli._params import (
add_param_yes,
base_params,
)
from promptflow._cli._utils import activate_action, get_cli_sdk_logger
from promptflow._utils.utils import prompt_y_n
from promptflow.exceptions import UserErrorException
logger = get_cli_sdk_logger()
UPGRADE_MSG = 'Not able to upgrade automatically'
def add_upgrade_parser(subparsers):
"""Add upgrade parser to the pf subparsers."""
epilog = """
Examples:
# Upgrade prompt flow without prompt and run non-interactively:
pf upgrade --yes
""" # noqa: E501
add_params = [
add_param_yes,
] + base_params
activate_action(
name="upgrade",
description="Upgrade prompt flow CLI.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="pf upgrade",
action_param_name="action",
)
def upgrade_version(args):
import platform
import sys
import subprocess
from promptflow._constants import _ENV_PF_INSTALLER, CLI_PACKAGE_NAME
from promptflow._version import VERSION as local_version
from packaging.version import parse
from promptflow._utils.version_hint_utils import get_latest_version_from_pypi
latest_version = get_latest_version_from_pypi(CLI_PACKAGE_NAME)
if not latest_version:
logger.warning("Failed to get the latest prompt flow version.")
return
elif parse(latest_version) <= parse(local_version):
logger.warning("You already have the latest prompt flow version: %s", local_version)
return
yes = args.yes
exit_code = 0
installer = os.getenv(_ENV_PF_INSTALLER) or ''
installer = installer.upper()
print(f"installer: {installer}")
latest_version_msg = 'Upgrading prompt flow CLI version to {}.'.format(latest_version) if yes \
else 'Latest version available is {}.'.format(latest_version)
logger.warning("Your current prompt flow CLI version is %s. %s", local_version, latest_version_msg)
if not yes:
logger.warning("Please check the release notes first")
if not sys.stdin.isatty():
logger.debug('No tty available.')
raise UserErrorException("No tty available. Please run command with --yes.")
confirmation = prompt_y_n("Do you want to continue?", default='y')
if not confirmation:
logger.debug("Upgrade stopped by user")
return
if installer == 'MSI':
_upgrade_on_windows(yes)
elif installer == 'PIP':
pip_args = [sys.executable, '-m', 'pip', 'install', '--upgrade',
'promptflow[azure,executable,pfs,azureml-serving]', '-vv',
'--disable-pip-version-check', '--no-cache-dir']
logger.debug("Update prompt flow with '%s'", " ".join(pip_args))
exit_code = subprocess.call(pip_args, shell=platform.system() == 'Windows')
elif installer == 'SCRIPT':
command = "curl https://promptflowartifact.blob.core.windows.net/linux-install-scripts/install | bash"
logger.warning(f"{UPGRADE_MSG}, you can try to run {command} in your terminal directly to upgrade package.")
return
else:
logger.warning(UPGRADE_MSG)
return
if exit_code:
err_msg = "CLI upgrade failed."
logger.warning(err_msg)
sys.exit(exit_code)
import importlib
import json
importlib.reload(subprocess)
importlib.reload(json)
version_result = subprocess.check_output(['pf', 'version'], shell=platform.system() == 'Windows')
version_json = json.loads(version_result)
new_version = version_json['promptflow']
if new_version == local_version:
err_msg = f"CLI upgrade to version {latest_version} failed or aborted."
logger.warning(err_msg)
sys.exit(1)
logger.warning("Upgrade finished.")
def _upgrade_on_windows(yes):
"""Download MSI to a temp folder and install it with msiexec.exe.
Directly installing from URL may be blocked by policy: https://github.com/Azure/azure-cli/issues/19171
This also gives the user a chance to manually install the MSI in case of msiexec.exe failure.
"""
import subprocess
import sys
import tempfile
msi_url = 'https://aka.ms/installpromptflowwindowsx64'
logger.warning("Updating prompt flow CLI with MSI from %s", msi_url)
# Save MSI to ~\AppData\Local\Temp\promptflow-msi, clean up the folder first
msi_dir = os.path.join(tempfile.gettempdir(), 'promptflow-msi')
try:
import shutil
shutil.rmtree(msi_dir)
except FileNotFoundError:
# The folder has already been deleted. No further retry is needed.
# errno: 2, winerror: 3, strerror: 'The system cannot find the path specified'
pass
except OSError as err:
logger.warning("Failed to delete '%s': %s. You may try to delete it manually.", msi_dir, err)
os.makedirs(msi_dir, exist_ok=True)
msi_path = _download_from_url(msi_url, msi_dir)
if yes:
subprocess.Popen(['msiexec.exe', '/i', msi_path, '/qn'])
else:
subprocess.call(['msiexec.exe', '/i', msi_path])
logger.warning("Installation started. Please complete the upgrade in the opened window.")
sys.exit(0)
def _download_from_url(url, target_dir):
import requests
r = requests.get(url, stream=True)
if r.status_code != 200:
raise UserErrorException("Request to {} failed with {}".format(url, r.status_code))
# r.url is the real path of the msi, like
# 'https://promptflowartifact.blob.core.windows.net/msi-installer/promptflow.msi'
file_name = r.url.rsplit('/')[-1]
msi_path = os.path.join(target_dir, file_name)
logger.warning("Downloading MSI to %s", msi_path)
with open(msi_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return msi_path
| promptflow/src/promptflow/promptflow/_cli/_pf/_upgrade.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf/_upgrade.py",
"repo_id": "promptflow",
"token_count": 2343
} | 26 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
name: {{ connection }}
type: open_ai
api_key: "<user-input>"
| promptflow/src/promptflow/promptflow/_cli/data/chat_flow/template/openai.yaml.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/chat_flow/template/openai.yaml.jinja2",
"repo_id": "promptflow",
"token_count": 59
} | 27 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from contextvars import ContextVar
from typing import Dict, Mapping
from promptflow._version import VERSION
class OperationContext(Dict):
"""The OperationContext class.
This class is used to store the context information for the current operation. It is a dictionary-like class
that can be used to store any primitive context information. The object is a context variable that can be
accessed from anywhere in the current context. The context information is used to provide additional information
to the service for logging and telemetry purposes.
"""
_CONTEXT_KEY = "operation_context"
_current_context = ContextVar(_CONTEXT_KEY, default=None)
USER_AGENT_KEY = "user_agent"
@classmethod
def get_instance(cls):
"""Get the OperationContext instance.
This method returns the OperationContext instance from the current context.
If there is no instance in the current context, it creates a new one and sets it in the current context.
Returns:
OperationContext: The OperationContext instance.
"""
# get the OperationContext instance from the current context
instance = cls._current_context.get()
if instance is None:
# create a new instance and set it in the current context
instance = OperationContext()
cls._current_context.set(instance)
return instance
def __setattr__(self, name, value):
"""Set the attribute.
This method sets an attribute with the given name and value in the OperationContext instance.
The name must be a string and the value must be a primitive.
Args:
name (str): The name of the attribute.
value (int, float, str, bool, or None): The value of the attribute.
Raises:
TypeError: If name is not a string or value is not a primitive.
"""
# check that name is a string
if not isinstance(name, str):
raise TypeError("Name must be a string")
# check that value is a primitive
if value is not None and not isinstance(value, (int, float, str, bool)):
raise TypeError("Value must be a primitive")
# set the item in the data attribute
self[name] = value
def __getattr__(self, name):
"""Get the attribute.
This method returns the attribute with the given name from the OperationContext instance.
If there is no such attribute, it returns the default attribute from the super class.
Args:
name (str): The name of the attribute.
Returns:
int, float, str, bool, or None: The value of the attribute.
"""
if name in self:
return self[name]
else:
super().__getattribute__(name)
def __delattr__(self, name):
"""Delete the attribute.
This method deletes the attribute with the given name from the OperationContext instance.
If there is no such attribute, it deletes the default attribute from the super class.
Args:
name (str): The name of the attribute.
"""
if name in self:
del self[name]
else:
super().__delattr__(name)
def get_user_agent(self):
"""Get the user agent string.
This method returns the user agent string for the OperationContext instance.
The user agent string consists of the promptflow-sdk version and any additional user agent information stored in
the user_agent attribute.
Returns:
str: The user agent string.
"""
def parts():
if OperationContext.USER_AGENT_KEY in self:
yield self.get(OperationContext.USER_AGENT_KEY)
yield f"promptflow/{VERSION}"
# strip to avoid leading or trailing spaces, which may cause error when sending request
ua = " ".join(parts()).strip()
return ua
def append_user_agent(self, user_agent: str):
"""Append the user agent string.
This method appends user agent information to the user_agent attribute of the OperationContext instance.
If there is no user_agent attribute, it creates one with the given user agent information.
Args:
user_agent (str): The user agent information to append.
"""
if OperationContext.USER_AGENT_KEY in self:
if user_agent not in self.user_agent:
self.user_agent = f"{self.user_agent.strip()} {user_agent.strip()}"
else:
self.user_agent = user_agent
def set_batch_input_source_from_inputs_mapping(self, inputs_mapping: Mapping[str, str]):
"""Infer the batch input source from the input mapping and set it in the OperationContext instance.
This method analyzes the `inputs_mapping` to ascertain the origin of the inputs for a batch operation.
The `inputs_mapping` should be a dictionary with keys representing input names and values specifying the sources
of these inputs. Inputs can originate from direct data or from the outputs of a previous run.
The `inputs_mapping` is dictated entirely by the external caller. For more details on column mapping, refer to
https://aka.ms/pf/column-mapping. The mapping can include references to both the inputs and outputs of previous
runs, using a reserved source name 'run' to indicate such references. However, this method specifically checks
for references to outputs of previous runs, which are denoted by values starting with "${run.outputs". When such
a reference is found, the `batch_input_source` attribute of the OperationContext instance is set to "Run" to
reflect that the batch operation is utilizing outputs from a prior run.
If no values in the `inputs_mapping` start with "${run.outputs", it is inferred that the inputs do not derive
from a previous run, and the `batch_input_source` is set to "Data".
Examples of `inputs_mapping`:
- Referencing a previous run's output:
{'input1': '${run.outputs.some_output}', 'input2': 'direct_data'}
In this case, 'input1' is sourced from a prior run's output, and 'input2' is from direct data.
The `batch_input_source` would be set to "Run".
- Sourcing directly from data:
{'input1': 'data_source1', 'input2': 'data_source2'}
Since no values start with "${run.outputs", the `batch_input_source` is set to "Data".
Args:
inputs_mapping (Mapping[str, str]): A dictionary mapping input names to their sources, where the sources
can be either direct data or outputs from a previous run. The structure and content of this mapping are
entirely under the control of the external caller.
Returns:
None
"""
if inputs_mapping and any(
isinstance(value, str) and value.startswith("${run.outputs") for value in inputs_mapping.values()
):
self.batch_input_source = "Run"
else:
self.batch_input_source = "Data"
def get_context_dict(self):
"""Get the context dictionary.
This method returns the context dictionary for the OperationContext instance.
The context dictionary is a dictionary that contains all the context information stored in the OperationContext
instance.
Returns:
dict: The context dictionary.
"""
return dict(self)
| promptflow/src/promptflow/promptflow/_core/operation_context.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/operation_context.py",
"repo_id": "promptflow",
"token_count": 2764
} | 28 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from typing import List, Optional
from sqlalchemy import TEXT, Column
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import declarative_base
from promptflow._sdk._constants import CONNECTION_TABLE_NAME
from promptflow._sdk._orm.retry import sqlite_retry
from .._errors import ConnectionNotFoundError
from .session import mgmt_db_session
Base = declarative_base()
class Connection(Base):
__tablename__ = CONNECTION_TABLE_NAME
connectionName = Column(TEXT, primary_key=True)
connectionType = Column(TEXT, nullable=False)
configs = Column(TEXT, nullable=False) # For custom connection, configs can be
customConfigs = Column(TEXT, nullable=False) # For strong type connection, custom configs is an empty dict
createdDate = Column(TEXT, nullable=False) # ISO8601("YYYY-MM-DD HH:MM:SS.SSS"), string
lastModifiedDate = Column(TEXT, nullable=False) # ISO8601("YYYY-MM-DD HH:MM:SS.SSS"), string
expiryTime = Column(TEXT) # ISO8601("YYYY-MM-DD HH:MM:SS.SSS"), string
@staticmethod
@sqlite_retry
def create_or_update(connection: "Connection") -> None:
session = mgmt_db_session()
name = connection.connectionName
try:
session.add(connection)
session.commit()
except IntegrityError:
session = mgmt_db_session()
# Remove the _sa_instance_state
update_dict = {k: v for k, v in connection.__dict__.items() if not k.startswith("_")}
update_dict.pop("createdDate")
session.query(Connection).filter(Connection.connectionName == name).update(update_dict)
session.commit()
@staticmethod
@sqlite_retry
def get(name: str, raise_error=True) -> "Connection":
with mgmt_db_session() as session:
connection = session.query(Connection).filter(Connection.connectionName == name).first()
if connection is None and raise_error:
raise ConnectionNotFoundError(f"Connection {name!r} is not found.")
return connection
@staticmethod
@sqlite_retry
def list(max_results: Optional[int] = None, all_results: bool = False) -> List["Connection"]:
with mgmt_db_session() as session:
if all_results:
return [run_info for run_info in session.query(Connection).all()]
else:
return [run_info for run_info in session.query(Connection).limit(max_results)]
@staticmethod
@sqlite_retry
def delete(name: str) -> None:
with mgmt_db_session() as session:
session.query(Connection).filter(Connection.connectionName == name).delete()
session.commit()
| promptflow/src/promptflow/promptflow/_sdk/_orm/connection.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_orm/connection.py",
"repo_id": "promptflow",
"token_count": 1061
} | 29 |
packageName: Promptflow.Core.PfsClient
packageVersion: 0.0.1
targetFramework: netstandard2.0
optionalProjectFile: false
| promptflow/src/promptflow/promptflow/_sdk/_service/generator_configs/csharp.yaml/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_service/generator_configs/csharp.yaml",
"repo_id": "promptflow",
"token_count": 37
} | 30 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from dataclasses import dataclass
from typing import Mapping, Any
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
@dataclass
class FlowResult:
"""The result of a flow call."""
output: Mapping[str, Any]
# trace info of the flow run.
run_info: FlowRunInfo
node_run_infos: Mapping[str, NodeRunInfo]
| promptflow/src/promptflow/promptflow/_sdk/_serving/flow_result.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/flow_result.py",
"repo_id": "promptflow",
"token_count": 154
} | 31 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from .activity import ( # noqa: F401
ActivityCompletionStatus,
ActivityType,
log_activity,
monitor_operation,
request_id_context,
)
from .logging_handler import PromptFlowSDKLogHandler, get_appinsights_log_handler # noqa: F401
from .telemetry import TelemetryMixin, WorkspaceTelemetryMixin, get_telemetry_logger, is_telemetry_enabled # noqa: F401
| promptflow/src/promptflow/promptflow/_sdk/_telemetry/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_telemetry/__init__.py",
"repo_id": "promptflow",
"token_count": 150
} | 32 |
# syntax=docker/dockerfile:1
FROM mcr.microsoft.com/dotnet/sdk:6.0 AS build
WORKDIR /
COPY ./flow /flow
COPY ./connections /connections
COPY ./start.sh /start.sh
FROM mcr.microsoft.com/dotnet/aspnet:6.0 AS runtime
COPY --from=build / /
ENV IS_IN_DOCKER="true"
EXPOSE 8080
RUN apt-get update && apt-get install -y runit
# reset runsvdir
RUN rm -rf /var/runit
COPY ./runit /var/runit
# grant permission
RUN chmod -R +x /var/runit
CMD ["bash", "./start.sh"]
| promptflow/src/promptflow/promptflow/_sdk/data/docker_csharp/Dockerfile.jinja2/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/docker_csharp/Dockerfile.jinja2",
"repo_id": "promptflow",
"token_count": 194
} | 33 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from os import PathLike
from pathlib import Path
from typing import Union
from promptflow._constants import LANGUAGE_KEY, FlowLanguage
from promptflow._sdk._constants import BASE_PATH_CONTEXT_KEY
from promptflow._sdk.entities._flow import FlowBase
from promptflow.exceptions import UserErrorException
class EagerFlow(FlowBase):
"""This class is used to represent an eager flow."""
def __init__(
self,
path: Union[str, PathLike],
entry: str,
data: dict,
**kwargs,
):
self.path = Path(path)
self.code = self.path.parent
self.entry = entry
self._data = data
super().__init__(**kwargs)
@property
def language(self) -> str:
return self._data.get(LANGUAGE_KEY, FlowLanguage.Python)
@classmethod
def _create_schema_for_validation(cls, context):
# import here to avoid circular import
from ..schemas._flow import EagerFlowSchema
return EagerFlowSchema(context=context)
@classmethod
def _load(cls, path: Path, entry: str = None, data: dict = None, **kwargs):
data = data or {}
# schema validation on unknown fields
if path.suffix in [".yaml", ".yml"]:
data = cls._create_schema_for_validation(context={BASE_PATH_CONTEXT_KEY: path.parent}).load(data)
path = data["path"]
if entry:
raise UserErrorException("Specifying entry function is not allowed when YAML file is provided.")
else:
entry = data["entry"]
if entry is None:
raise UserErrorException(f"Entry function is not specified for flow {path}")
return cls(path=path, entry=entry, data=data, **kwargs)
| promptflow/src/promptflow/promptflow/_sdk/entities/_eager_flow.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/entities/_eager_flow.py",
"repo_id": "promptflow",
"token_count": 724
} | 34 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import os.path
import sys
import time
from dataclasses import asdict
from typing import Any, Dict, List, Optional, Union
from promptflow._constants import LANGUAGE_KEY, AvailableIDE, FlowLanguage
from promptflow._sdk._constants import (
MAX_RUN_LIST_RESULTS,
MAX_SHOW_DETAILS_RESULTS,
FlowRunProperties,
ListViewType,
RunInfoSources,
RunStatus,
)
from promptflow._sdk._errors import InvalidRunStatusError, RunExistsError, RunNotFoundError, RunOperationParameterError
from promptflow._sdk._orm import RunInfo as ORMRun
from promptflow._sdk._telemetry import ActivityType, TelemetryMixin, monitor_operation
from promptflow._sdk._utils import incremental_print, print_red_error, safe_parse_object_list
from promptflow._sdk._visualize_functions import dump_html, generate_html_string
from promptflow._sdk.entities import Run
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow._utils.yaml_utils import load_yaml_string
from promptflow.contracts._run_management import RunDetail, RunMetadata, RunVisualization, VisualizationConfig
from promptflow.exceptions import UserErrorException
RUNNING_STATUSES = RunStatus.get_running_statuses()
logger = get_cli_sdk_logger()
class RunOperations(TelemetryMixin):
"""RunOperations."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@monitor_operation(activity_name="pf.runs.list", activity_type=ActivityType.PUBLICAPI)
def list(
self,
max_results: Optional[int] = MAX_RUN_LIST_RESULTS,
*,
list_view_type: ListViewType = ListViewType.ACTIVE_ONLY,
) -> List[Run]:
"""List runs.
:param max_results: Max number of results to return. Default: MAX_RUN_LIST_RESULTS.
:type max_results: Optional[int]
:param list_view_type: View type for including/excluding (for example) archived runs. Default: ACTIVE_ONLY.
:type include_archived: Optional[ListViewType]
:return: List of run objects.
:rtype: List[~promptflow.entities.Run]
"""
orm_runs = ORMRun.list(max_results=max_results, list_view_type=list_view_type)
return safe_parse_object_list(
obj_list=orm_runs,
parser=Run._from_orm_object,
message_generator=lambda x: f"Error parsing run {x.name!r}, skipped.",
)
@monitor_operation(activity_name="pf.runs.get", activity_type=ActivityType.PUBLICAPI)
def get(self, name: str) -> Run:
"""Get a run entity.
:param name: Name of the run.
:type name: str
:return: run object retrieved from the database.
:rtype: ~promptflow.entities.Run
"""
return self._get(name)
def _get(self, name: str) -> Run:
name = Run._validate_and_return_run_name(name)
try:
return Run._from_orm_object(ORMRun.get(name))
except RunNotFoundError as e:
raise e
@monitor_operation(activity_name="pf.runs.create_or_update", activity_type=ActivityType.PUBLICAPI)
def create_or_update(self, run: Run, **kwargs) -> Run:
"""Create or update a run.
:param run: Run object to create or update.
:type run: ~promptflow.entities.Run
:return: Run object created or updated.
:rtype: ~promptflow.entities.Run
"""
# create run from an existing run folder
if run._run_source == RunInfoSources.EXISTING_RUN:
return self._create_run_from_existing_run_folder(run=run, **kwargs)
# TODO: change to async
stream = kwargs.pop("stream", False)
try:
from promptflow._sdk._submitter import RunSubmitter
created_run = RunSubmitter(run_operations=self).submit(run=run, **kwargs)
if stream:
self.stream(created_run)
return created_run
except RunExistsError:
raise RunExistsError(f"Run {run.name!r} already exists.")
def _create_run_from_existing_run_folder(self, run: Run, **kwargs) -> Run:
"""Create run from existing run folder."""
try:
self.get(run.name)
except RunNotFoundError:
pass
else:
raise RunExistsError(f"Run {run.name!r} already exists.")
try:
run._dump()
return run
except Exception as e:
raise UserErrorException(
f"Failed to create run {run.name!r} from existing run folder {run.source!r}: {str(e)}"
) from e
def _print_run_summary(self, run: Run) -> None:
print("======= Run Summary =======\n")
duration = str(run._end_time - run._created_on)
print(
f'Run name: "{run.name}"\n'
f'Run status: "{run.status}"\n'
f'Start time: "{run._created_on}"\n'
f'Duration: "{duration}"\n'
f'Output path: "{run._output_path}"\n'
)
@monitor_operation(activity_name="pf.runs.stream", activity_type=ActivityType.PUBLICAPI)
def stream(self, name: Union[str, Run], raise_on_error: bool = True) -> Run:
"""Stream run logs to the console.
:param name: Name of the run, or run object.
:type name: Union[str, ~promptflow.sdk.entities.Run]
:param raise_on_error: Raises an exception if a run fails or canceled.
:type raise_on_error: bool
:return: Run object.
:rtype: ~promptflow.entities.Run
"""
name = Run._validate_and_return_run_name(name)
run = self.get(name=name)
local_storage = LocalStorageOperations(run=run)
file_handler = sys.stdout
try:
printed = 0
run = self.get(run.name)
while run.status in RUNNING_STATUSES or run.status == RunStatus.FINALIZING:
file_handler.flush()
available_logs = local_storage.logger.get_logs()
printed = incremental_print(available_logs, printed, file_handler)
time.sleep(10)
run = self.get(run.name)
# ensure all logs are printed
file_handler.flush()
available_logs = local_storage.logger.get_logs()
incremental_print(available_logs, printed, file_handler)
self._print_run_summary(run)
except KeyboardInterrupt:
error_message = "The output streaming for the run was interrupted, but the run is still executing."
print(error_message)
if run.status == RunStatus.FAILED or run.status == RunStatus.CANCELED:
if run.status == RunStatus.FAILED:
error_message = local_storage.load_exception().get("message", "Run fails with unknown error.")
else:
error_message = "Run is canceled."
if raise_on_error:
raise InvalidRunStatusError(error_message)
else:
print_red_error(error_message)
return run
@monitor_operation(activity_name="pf.runs.archive", activity_type=ActivityType.PUBLICAPI)
def archive(self, name: Union[str, Run]) -> Run:
"""Archive a run.
:param name: Name of the run.
:type name: str
:return: archived run object.
:rtype: ~promptflow._sdk.entities._run.Run
"""
name = Run._validate_and_return_run_name(name)
ORMRun.get(name).archive()
return self.get(name)
@monitor_operation(activity_name="pf.runs.restore", activity_type=ActivityType.PUBLICAPI)
def restore(self, name: Union[str, Run]) -> Run:
"""Restore a run.
:param name: Name of the run.
:type name: str
:return: restored run object.
:rtype: ~promptflow._sdk.entities._run.Run
"""
name = Run._validate_and_return_run_name(name)
ORMRun.get(name).restore()
return self.get(name)
@monitor_operation(activity_name="pf.runs.update", activity_type=ActivityType.PUBLICAPI)
def update(
self,
name: Union[str, Run],
display_name: Optional[str] = None,
description: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs,
) -> Run:
"""Update run status.
:param name: run name
:param display_name: display name to update
:param description: description to update
:param tags: tags to update
:param kwargs: other fields to update, fields not supported will be directly dropped.
:return: updated run object
:rtype: ~promptflow._sdk.entities._run.Run
"""
name = Run._validate_and_return_run_name(name)
# the kwargs is to support update run status scenario but keep it private
ORMRun.get(name).update(display_name=display_name, description=description, tags=tags, **kwargs)
return self.get(name)
@monitor_operation(activity_name="pf.runs.delete", activity_type=ActivityType.PUBLICAPI)
def delete(
self,
name: str,
) -> None:
"""Delete run permanently.
Caution: This operation will delete the run permanently from your local disk.
Both run entity and output data will be deleted.
:param name: run name to delete
:return: None
"""
valid_run = self.get(name)
LocalStorageOperations(valid_run).delete()
ORMRun.delete(name)
@monitor_operation(activity_name="pf.runs.get_details", activity_type=ActivityType.PUBLICAPI)
def get_details(
self, name: Union[str, Run], max_results: int = MAX_SHOW_DETAILS_RESULTS, all_results: bool = False
) -> "DataFrame":
"""Get the details from the run.
.. note::
If `all_results` is set to True, `max_results` will be overwritten to sys.maxsize.
:param name: The run name or run object
:type name: Union[str, ~promptflow.sdk.entities.Run]
:param max_results: The max number of runs to return, defaults to 100
:type max_results: int
:param all_results: Whether to return all results, defaults to False
:type all_results: bool
:raises RunOperationParameterError: If `max_results` is not a positive integer.
:return: The details data frame.
:rtype: pandas.DataFrame
"""
from pandas import DataFrame
# if all_results is True, set max_results to sys.maxsize
if all_results:
max_results = sys.maxsize
if not isinstance(max_results, int) or max_results < 1:
raise RunOperationParameterError(f"'max_results' must be a positive integer, got {max_results!r}")
name = Run._validate_and_return_run_name(name)
run = self.get(name=name)
local_storage = LocalStorageOperations(run=run)
inputs, outputs = local_storage.load_inputs_and_outputs()
inputs = inputs.to_dict("list")
outputs = outputs.to_dict("list")
data = {}
columns = []
for k in inputs:
new_k = f"inputs.{k}"
data[new_k] = copy.deepcopy(inputs[k])
columns.append(new_k)
for k in outputs:
new_k = f"outputs.{k}"
data[new_k] = copy.deepcopy(outputs[k])
columns.append(new_k)
df = DataFrame(data).head(max_results).reindex(columns=columns)
return df
@monitor_operation(activity_name="pf.runs.get_metrics", activity_type=ActivityType.PUBLICAPI)
def get_metrics(self, name: Union[str, Run]) -> Dict[str, Any]:
"""Get run metrics.
:param name: name of the run.
:type name: str
:return: Run metrics.
:rtype: Dict[str, Any]
"""
name = Run._validate_and_return_run_name(name)
run = self.get(name=name)
run._check_run_status_is_completed()
local_storage = LocalStorageOperations(run=run)
return local_storage.load_metrics()
def _visualize(self, runs: List[Run], html_path: Optional[str] = None) -> None:
details: List[RunDetail] = []
metadatas: List[RunMetadata] = []
configs: List[VisualizationConfig] = []
for run in runs:
# check run status first
# if run status is not compeleted, there might be unexpected error during parse data
# so we directly raise error if there is any incomplete run
run._check_run_status_is_completed()
local_storage = LocalStorageOperations(run)
# nan, inf and -inf are not JSON serializable, which will lead to JavaScript parse error
# so specify `parse_const_as_str` as True to parse them as string
detail = local_storage.load_detail(parse_const_as_str=True)
# ad-hoc step: make logs field empty to avoid too big HTML file
# we don't provide logs view in visualization page for now
# when we enable, we will save those big data (e.g. logs) in separate file(s)
# JS load can be faster than static HTML
for i in range(len(detail["node_runs"])):
detail["node_runs"][i]["logs"] = {"stdout": "", "stderr": ""}
metadata = RunMetadata(
name=run.name,
display_name=run.display_name,
create_time=run.created_on,
flow_path=run.properties.get(FlowRunProperties.FLOW_PATH, None),
output_path=run.properties[FlowRunProperties.OUTPUT_PATH],
tags=run.tags,
lineage=run.run,
metrics=self.get_metrics(name=run.name),
dag=local_storage.load_dag_as_string(),
flow_tools_json=local_storage.load_flow_tools_json(),
mode="eager" if local_storage.eager_mode else "",
)
details.append(copy.deepcopy(detail))
metadatas.append(asdict(metadata))
# TODO: add language to run metadata
flow_dag = load_yaml_string(metadata.dag) or {}
configs.append(
VisualizationConfig(
[AvailableIDE.VS_CODE]
if flow_dag.get(LANGUAGE_KEY, FlowLanguage.Python) == FlowLanguage.Python
else [AvailableIDE.VS]
)
)
data_for_visualize = RunVisualization(
detail=details,
metadata=metadatas,
config=configs,
)
html_string = generate_html_string(asdict(data_for_visualize))
# if html_path is specified, not open it in webbrowser(as it comes from VSC)
dump_html(html_string, html_path=html_path, open_html=html_path is None)
@monitor_operation(activity_name="pf.runs.visualize", activity_type=ActivityType.PUBLICAPI)
def visualize(self, runs: Union[str, Run, List[str], List[Run]], **kwargs) -> None:
"""Visualize run(s).
:param runs: List of run objects, or names of the runs.
:type runs: Union[str, ~promptflow.sdk.entities.Run, List[str], List[~promptflow.sdk.entities.Run]]
"""
if not isinstance(runs, list):
runs = [runs]
validated_runs = []
for run in runs:
run_name = Run._validate_and_return_run_name(run)
validated_runs.append(self.get(name=run_name))
html_path = kwargs.pop("html_path", None)
try:
self._visualize(validated_runs, html_path=html_path)
except InvalidRunStatusError as e:
error_message = f"Cannot visualize non-completed run. {str(e)}"
logger.error(error_message)
def _get_outputs(self, run: Union[str, Run]) -> List[Dict[str, Any]]:
"""Get the outputs of the run, load from local storage."""
local_storage = self._get_local_storage(run)
return local_storage.load_outputs()
def _get_inputs(self, run: Union[str, Run]) -> List[Dict[str, Any]]:
"""Get the outputs of the run, load from local storage."""
local_storage = self._get_local_storage(run)
return local_storage.load_inputs()
def _get_outputs_path(self, run: Union[str, Run]) -> str:
"""Get the outputs file path of the run."""
local_storage = self._get_local_storage(run)
return local_storage._outputs_path if local_storage.load_outputs() else None
def _get_data_path(self, run: Union[str, Run]) -> str:
"""Get the outputs file path of the run."""
local_storage = self._get_local_storage(run)
# TODO: what if the data is deleted?
if local_storage._data_path and not os.path.exists(local_storage._data_path):
raise UserErrorException(
f"Data path {local_storage._data_path} for run {run.name} does not exist. "
"Please make sure it exists and not deleted."
)
return local_storage._data_path
def _get_local_storage(self, run: Union[str, Run]) -> LocalStorageOperations:
"""Get the local storage of the run."""
if isinstance(run, str):
run = self.get(name=run)
return LocalStorageOperations(run)
| promptflow/src/promptflow/promptflow/_sdk/operations/_run_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/operations/_run_operations.py",
"repo_id": "promptflow",
"token_count": 7452
} | 35 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import os
from datetime import datetime
from enum import Enum
from traceback import TracebackException, format_tb
from types import TracebackType, FrameType
from promptflow.exceptions import PromptflowException, SystemErrorException, UserErrorException, ValidationException
ADDITIONAL_INFO_USER_EXECUTION_ERROR = "ToolExecutionErrorDetails"
ADDITIONAL_INFO_USER_CODE_STACKTRACE = "UserCodeStackTrace"
CAUSE_MESSAGE = "\nThe above exception was the direct cause of the following exception:\n\n"
CONTEXT_MESSAGE = "\nDuring handling of the above exception, another exception occurred:\n\n"
TRACEBACK_MESSAGE = "Traceback (most recent call last):\n"
class RootErrorCode:
USER_ERROR = "UserError"
SYSTEM_ERROR = "SystemError"
class ResponseCode(str, Enum):
SUCCESS = "200"
ACCEPTED = "202"
REDIRECTION = "300"
CLIENT_ERROR = "400"
SERVICE_ERROR = "500"
UNKNOWN = "0"
class ErrorResponse:
"""A class that represents the response body when an error occurs.
It follows the following specification:
https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses
"""
def __init__(self, error_dict):
self._error_dict = error_dict
@staticmethod
def from_error_dict(error_dict):
"""Create an ErrorResponse from an error dict.
The error dict which usually is generated by ExceptionPresenter.create(exception).to_dict()
"""
return ErrorResponse(error_dict)
@staticmethod
def from_exception(ex: Exception, *, include_debug_info=False):
presenter = ExceptionPresenter.create(ex)
error_dict = presenter.to_dict(include_debug_info=include_debug_info)
return ErrorResponse(error_dict)
@property
def message(self):
return self._error_dict.get("message", "")
@property
def response_code(self):
"""Given the error code, return the corresponding http response code."""
root_error_code = self._error_dict.get("code")
return ResponseCode.CLIENT_ERROR if root_error_code == RootErrorCode.USER_ERROR else ResponseCode.SERVICE_ERROR
@property
def additional_info(self):
"""Return the additional info of the error.
The additional info is defined in the error response.
It is stored as a list of dict, each of which contains a "type" and "info" field.
We change the list of dict to a dict of dict for easier access.
"""
result = {}
list_of_dict = self._error_dict.get("additionalInfo")
if not list_of_dict or not isinstance(list_of_dict, list):
return result
for item in list_of_dict:
# We just ignore the item if it is not a dict or does not contain the required fields.
if not isinstance(item, dict):
continue
name = item.get("type")
info = item.get("info")
if not name or not info:
continue
result[name] = info
return result
def get_additional_info(self, name):
"""Get the additional info by name."""
return self.additional_info.get(name)
def get_user_execution_error_info(self):
"""Get user tool execution error info from additional info."""
user_execution_error_info = self.get_additional_info(ADDITIONAL_INFO_USER_EXECUTION_ERROR)
if not user_execution_error_info or not isinstance(user_execution_error_info, dict):
return {}
return user_execution_error_info
def to_dict(self):
from promptflow._core.operation_context import OperationContext
return {
"error": self._error_dict,
"correlation": None, # TODO: to be implemented
"environment": None, # TODO: to be implemented
"location": None, # TODO: to be implemented
"componentName": OperationContext.get_instance().get_user_agent(),
"time": datetime.utcnow().isoformat(),
}
def to_simplified_dict(self):
return {
"error": {
"code": self._error_dict.get("code"),
"message": self._error_dict.get("message"),
}
}
@property
def error_codes(self):
error = self._error_dict
error_codes = []
while error is not None:
code = error.get("code")
if code is not None:
error_codes.append(code)
error = error.get("innerError")
else:
break
return error_codes
@property
def error_code_hierarchy(self):
"""Get the code hierarchy from error dict."""
return "/".join(self.error_codes)
@property
def innermost_error_code(self):
error_codes = self.error_codes
if error_codes:
return error_codes[-1]
return None
class ExceptionPresenter:
"""A class that can extract information from the exception instance.
It is designed to work for both PromptflowException and other exceptions.
"""
def __init__(self, ex: Exception):
self._ex = ex
@staticmethod
def create(ex: Exception):
if isinstance(ex, PromptflowException):
return PromptflowExceptionPresenter(ex)
return ExceptionPresenter(ex)
@property
def formatted_traceback(self):
te = TracebackException.from_exception(self._ex)
return "".join(te.format())
@property
def debug_info(self):
return self.build_debug_info(self._ex)
def build_debug_info(self, ex: Exception):
inner_exception: dict = None
stack_trace = TRACEBACK_MESSAGE + "".join(format_tb(ex.__traceback__))
if ex.__cause__ is not None:
inner_exception = self.build_debug_info(ex.__cause__)
stack_trace = CAUSE_MESSAGE + stack_trace
elif ex.__context__ is not None and not ex.__suppress_context__:
inner_exception = self.build_debug_info(ex.__context__)
stack_trace = CONTEXT_MESSAGE + stack_trace
return {
"type": ex.__class__.__qualname__,
"message": str(ex),
"stackTrace": stack_trace,
"innerException": inner_exception,
}
@property
def error_codes(self):
"""The hierarchy of the error codes.
We follow the "Microsoft REST API Guidelines" to define error codes in a hierarchy style.
See the below link for details:
https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses
This method returns the error codes in a list. It will be converted into a nested json format by
error_code_recursed.
"""
return [infer_error_code_from_class(SystemErrorException), self._ex.__class__.__name__]
@property
def error_code_recursed(self):
"""Returns a dict of the error codes for this exception.
It is populated in a recursive manner, using the source from `error_codes` property.
i.e. For PromptflowException, such as ToolExcutionError which inherits from UserErrorException,
The result would be:
{
"code": "UserError",
"innerError": {
"code": "ToolExecutionError",
"innerError": None,
},
}
For other exception types, such as ValueError, the result would be:
{
"code": "SystemError",
"innerError": {
"code": "ValueError",
"innerError": None,
},
}
"""
current_error = None
reversed_error_codes = reversed(self.error_codes) if self.error_codes else []
for code in reversed_error_codes:
current_error = {
"code": code,
"innerError": current_error,
}
return current_error
def to_dict(self, *, include_debug_info=False):
"""Return a dict representation of the exception.
This dict specification corresponds to the specification of the Microsoft API Guidelines:
https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses
Note that this dict represents the "error" field in the response body of the API.
The whole error response is then populated in another place outside of this class.
"""
if isinstance(self._ex, JsonSerializedPromptflowException):
return self._ex.to_dict(include_debug_info=include_debug_info)
# Otherwise, return general dict representation of the exception.
result = {"message": str(self._ex), "messageFormat": "", "messageParameters": {}}
result.update(self.error_code_recursed)
if include_debug_info:
result["debugInfo"] = self.debug_info
return result
class PromptflowExceptionPresenter(ExceptionPresenter):
@property
def error_codes(self):
"""The hierarchy of the error codes.
We follow the "Microsoft REST API Guidelines" to define error codes in a hierarchy style.
See the below link for details:
https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses
For subclass of PromptflowException, use the ex.error_codes directly.
For PromptflowException (not a subclass), the ex.error_code is None.
The result should be:
["SystemError", {inner_exception type name if exist}]
"""
if self._ex.error_codes:
return self._ex.error_codes
# For PromptflowException (not a subclass), the ex.error_code is None.
# Handle this case specifically.
error_codes = [infer_error_code_from_class(SystemErrorException)]
if self._ex.inner_exception:
error_codes.append(infer_error_code_from_class(self._ex.inner_exception.__class__))
return error_codes
def to_dict(self, *, include_debug_info=False):
result = {
"message": self._ex.message,
"messageFormat": self._ex.message_format,
"messageParameters": self._ex.serializable_message_parameters,
"referenceCode": self._ex.reference_code,
}
result.update(self.error_code_recursed)
if self._ex.additional_info:
result["additionalInfo"] = [{"type": k, "info": v} for k, v in self._ex.additional_info.items()]
if include_debug_info:
result["debugInfo"] = self.debug_info
return result
class JsonSerializedPromptflowException(Exception):
"""Json serialized PromptflowException.
This exception only has one argument message to avoid the
argument missing error when load/dump with pickle in multiprocessing.
Ref: https://bugs.python.org/issue32696
:param message: A Json serialized message describing the error.
:type message: str
"""
def __init__(self, message):
self.message = message
super().__init__(self.message)
def __str__(self):
return self.message
def to_dict(self, *, include_debug_info=False):
# Return a dict representation of the inner exception.
error_dict = json.loads(self.message)
# The original serialized error might contain debugInfo.
# We pop it out if include_debug_info is set to False.
if not include_debug_info and "debugInfo" in error_dict:
error_dict.pop("debugInfo")
return error_dict
def get_tb_next(tb: TracebackType, next_cnt: int):
"""Return the nth tb_next of input tb.
If the tb does not have n tb_next, return the last tb which has a value.
n = next_cnt
"""
while tb.tb_next and next_cnt > 0:
tb = tb.tb_next
next_cnt -= 1
return tb
def last_frame_info(ex: Exception):
"""Return the line number where the error occurred."""
if ex:
tb = TracebackException.from_exception(ex)
last_frame = tb.stack[-1] if tb.stack else None
if last_frame:
return {
"filename": last_frame.filename,
"lineno": last_frame.lineno,
"name": last_frame.name,
}
return {}
def infer_error_code_from_class(cls):
# Python has a built-in SystemError
if cls == SystemErrorException:
return RootErrorCode.SYSTEM_ERROR
if cls == UserErrorException:
return RootErrorCode.USER_ERROR
if cls == ValidationException:
return "ValidationError"
return cls.__name__
def is_pf_core_frame(frame: FrameType):
"""Check if the frame is from promptflow core code."""
from promptflow import _core
folder_of_core = os.path.dirname(_core.__file__)
return folder_of_core in frame.f_code.co_filename
def remove_suffix(text: str, suffix: str = None):
"""
Given a string, removes specified suffix, if it has.
>>> remove_suffix('hello world', 'world')
'hello '
>>> remove_suffix('hello world', 'hello ')
'hello world'
>>> remove_suffix('NoColumnFoundError', 'Error')
'NoColumnFound'
:param text: string from which prefix will be removed.
:param suffix: suffix to be removed.
:return: string removed suffix.
"""
if not text or not suffix:
return text
if not text.endswith(suffix):
return text
return text[:-len(suffix)]
| promptflow/src/promptflow/promptflow/_utils/exception_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/exception_utils.py",
"repo_id": "promptflow",
"token_count": 5410
} | 36 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
from ._pf_client import PFClient
__all__ = ["PFClient"]
| promptflow/src/promptflow/promptflow/azure/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/__init__.py",
"repo_id": "promptflow",
"token_count": 75
} | 37 |
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.pipeline.transport import HttpRequest
def _convert_request(request, files=None):
data = request.content if not files else None
request = HttpRequest(method=request.method, url=request.url, headers=request.headers, data=data)
if files:
request.set_formdata_body(files)
return request
def _format_url_section(template, **kwargs):
components = template.split("/")
while components:
try:
return template.format(**kwargs)
except KeyError as key:
formatted_components = template.split("/")
components = [
c for c in formatted_components if "{}".format(key.args[0]) not in c
]
template = "/".join(components)
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/_vendor.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/_vendor.py",
"repo_id": "promptflow",
"token_count": 365
} | 38 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._tools_operations import build_get_dynamic_list_request, build_get_package_tools_request, build_get_samples_request, build_get_tool_meta_request, build_get_tool_meta_v2_request, build_get_tool_setting_request, build_retrieve_tool_func_result_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ToolsOperations:
"""ToolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get_tool_setting(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.ToolSetting":
"""get_tool_setting.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ToolSetting, or the result of cls(response)
:rtype: ~flow.models.ToolSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ToolSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_tool_setting_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get_tool_setting.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ToolSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_tool_setting.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/setting'} # type: ignore
@distributed_trace_async
async def get_samples(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> Dict[str, "_models.Tool"]:
"""get_samples.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: dict mapping str to Tool, or the result of cls(response)
:rtype: dict[str, ~flow.models.Tool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Dict[str, "_models.Tool"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_samples_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get_samples.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('{Tool}', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_samples.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/samples'} # type: ignore
@distributed_trace_async
async def get_tool_meta(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
tool_name: str,
tool_type: str,
endpoint_name: Optional[str] = None,
flow_runtime_name: Optional[str] = None,
flow_id: Optional[str] = None,
data: Optional[str] = None,
**kwargs: Any
) -> str:
"""get_tool_meta.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param tool_name:
:type tool_name: str
:param tool_type:
:type tool_type: str
:param endpoint_name:
:type endpoint_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:param data:
:type data: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "text/plain") # type: Optional[str]
_content = data
request = build_get_tool_meta_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
tool_name=tool_name,
tool_type=tool_type,
content=_content,
endpoint_name=endpoint_name,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.get_tool_meta.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_tool_meta.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/meta'} # type: ignore
@distributed_trace_async
async def get_tool_meta_v2(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_runtime_name: Optional[str] = None,
flow_id: Optional[str] = None,
body: Optional["_models.GenerateToolMetaRequest"] = None,
**kwargs: Any
) -> "_models.ToolMetaDto":
"""get_tool_meta_v2.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:param body:
:type body: ~flow.models.GenerateToolMetaRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ToolMetaDto, or the result of cls(response)
:rtype: ~flow.models.ToolMetaDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ToolMetaDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GenerateToolMetaRequest')
else:
_json = None
request = build_get_tool_meta_v2_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.get_tool_meta_v2.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ToolMetaDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_tool_meta_v2.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/meta-v2'} # type: ignore
@distributed_trace_async
async def get_package_tools(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_runtime_name: Optional[str] = None,
flow_id: Optional[str] = None,
**kwargs: Any
) -> Dict[str, "_models.Tool"]:
"""get_package_tools.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: dict mapping str to Tool, or the result of cls(response)
:rtype: dict[str, ~flow.models.Tool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Dict[str, "_models.Tool"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_package_tools_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.get_package_tools.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('{Tool}', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_package_tools.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/packageTools'} # type: ignore
@distributed_trace_async
async def get_dynamic_list(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_runtime_name: Optional[str] = None,
flow_id: Optional[str] = None,
body: Optional["_models.GetDynamicListRequest"] = None,
**kwargs: Any
) -> List[Any]:
"""get_dynamic_list.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:param body:
:type body: ~flow.models.GetDynamicListRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GetDynamicListRequest')
else:
_json = None
request = build_get_dynamic_list_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.get_dynamic_list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_dynamic_list.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/dynamicList'} # type: ignore
@distributed_trace_async
async def retrieve_tool_func_result(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
flow_runtime_name: Optional[str] = None,
flow_id: Optional[str] = None,
body: Optional["_models.RetrieveToolFuncResultRequest"] = None,
**kwargs: Any
) -> "_models.ToolFuncResponse":
"""retrieve_tool_func_result.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:param body:
:type body: ~flow.models.RetrieveToolFuncResultRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ToolFuncResponse, or the result of cls(response)
:rtype: ~flow.models.ToolFuncResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ToolFuncResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'RetrieveToolFuncResultRequest')
else:
_json = None
request = build_retrieve_tool_func_result_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.retrieve_tool_func_result.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ToolFuncResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_tool_func_result.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/RetrieveToolFuncResult'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_tools_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_tools_operations.py",
"repo_id": "promptflow",
"token_count": 8893
} | 39 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_get_tool_setting_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/setting')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_samples_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/samples')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_tool_meta_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
tool_name = kwargs.pop('tool_name') # type: str
tool_type = kwargs.pop('tool_type') # type: str
endpoint_name = kwargs.pop('endpoint_name', None) # type: Optional[str]
flow_runtime_name = kwargs.pop('flow_runtime_name', None) # type: Optional[str]
flow_id = kwargs.pop('flow_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/meta')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['toolName'] = _SERIALIZER.query("tool_name", tool_name, 'str')
query_parameters['toolType'] = _SERIALIZER.query("tool_type", tool_type, 'str')
if endpoint_name is not None:
query_parameters['endpointName'] = _SERIALIZER.query("endpoint_name", endpoint_name, 'str')
if flow_runtime_name is not None:
query_parameters['flowRuntimeName'] = _SERIALIZER.query("flow_runtime_name", flow_runtime_name, 'str')
if flow_id is not None:
query_parameters['flowId'] = _SERIALIZER.query("flow_id", flow_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_tool_meta_v2_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
flow_runtime_name = kwargs.pop('flow_runtime_name', None) # type: Optional[str]
flow_id = kwargs.pop('flow_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/meta-v2')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if flow_runtime_name is not None:
query_parameters['flowRuntimeName'] = _SERIALIZER.query("flow_runtime_name", flow_runtime_name, 'str')
if flow_id is not None:
query_parameters['flowId'] = _SERIALIZER.query("flow_id", flow_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_package_tools_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
flow_runtime_name = kwargs.pop('flow_runtime_name', None) # type: Optional[str]
flow_id = kwargs.pop('flow_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/packageTools')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if flow_runtime_name is not None:
query_parameters['flowRuntimeName'] = _SERIALIZER.query("flow_runtime_name", flow_runtime_name, 'str')
if flow_id is not None:
query_parameters['flowId'] = _SERIALIZER.query("flow_id", flow_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_dynamic_list_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
flow_runtime_name = kwargs.pop('flow_runtime_name', None) # type: Optional[str]
flow_id = kwargs.pop('flow_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/dynamicList')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if flow_runtime_name is not None:
query_parameters['flowRuntimeName'] = _SERIALIZER.query("flow_runtime_name", flow_runtime_name, 'str')
if flow_id is not None:
query_parameters['flowId'] = _SERIALIZER.query("flow_id", flow_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_retrieve_tool_func_result_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
flow_runtime_name = kwargs.pop('flow_runtime_name', None) # type: Optional[str]
flow_id = kwargs.pop('flow_id', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/RetrieveToolFuncResult')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if flow_runtime_name is not None:
query_parameters['flowRuntimeName'] = _SERIALIZER.query("flow_runtime_name", flow_runtime_name, 'str')
if flow_id is not None:
query_parameters['flowId'] = _SERIALIZER.query("flow_id", flow_id, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class ToolsOperations(object):
"""ToolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get_tool_setting(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ToolSetting"
"""get_tool_setting.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ToolSetting, or the result of cls(response)
:rtype: ~flow.models.ToolSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ToolSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_tool_setting_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get_tool_setting.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ToolSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_tool_setting.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/setting'} # type: ignore
@distributed_trace
def get_samples(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Dict[str, "_models.Tool"]
"""get_samples.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: dict mapping str to Tool, or the result of cls(response)
:rtype: dict[str, ~flow.models.Tool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Dict[str, "_models.Tool"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_samples_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.get_samples.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('{Tool}', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_samples.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/samples'} # type: ignore
@distributed_trace
def get_tool_meta(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
tool_name, # type: str
tool_type, # type: str
endpoint_name=None, # type: Optional[str]
flow_runtime_name=None, # type: Optional[str]
flow_id=None, # type: Optional[str]
data=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> str
"""get_tool_meta.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param tool_name:
:type tool_name: str
:param tool_type:
:type tool_type: str
:param endpoint_name:
:type endpoint_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:param data:
:type data: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "text/plain") # type: Optional[str]
_content = data
request = build_get_tool_meta_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
tool_name=tool_name,
tool_type=tool_type,
content=_content,
endpoint_name=endpoint_name,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.get_tool_meta.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_tool_meta.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/meta'} # type: ignore
@distributed_trace
def get_tool_meta_v2(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_runtime_name=None, # type: Optional[str]
flow_id=None, # type: Optional[str]
body=None, # type: Optional["_models.GenerateToolMetaRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.ToolMetaDto"
"""get_tool_meta_v2.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:param body:
:type body: ~flow.models.GenerateToolMetaRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ToolMetaDto, or the result of cls(response)
:rtype: ~flow.models.ToolMetaDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ToolMetaDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GenerateToolMetaRequest')
else:
_json = None
request = build_get_tool_meta_v2_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.get_tool_meta_v2.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ToolMetaDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_tool_meta_v2.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/meta-v2'} # type: ignore
@distributed_trace
def get_package_tools(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_runtime_name=None, # type: Optional[str]
flow_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Dict[str, "_models.Tool"]
"""get_package_tools.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: dict mapping str to Tool, or the result of cls(response)
:rtype: dict[str, ~flow.models.Tool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Dict[str, "_models.Tool"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_package_tools_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.get_package_tools.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('{Tool}', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_package_tools.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/packageTools'} # type: ignore
@distributed_trace
def get_dynamic_list(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_runtime_name=None, # type: Optional[str]
flow_id=None, # type: Optional[str]
body=None, # type: Optional["_models.GetDynamicListRequest"]
**kwargs # type: Any
):
# type: (...) -> List[Any]
"""get_dynamic_list.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:param body:
:type body: ~flow.models.GetDynamicListRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of any, or the result of cls(response)
:rtype: list[any]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List[Any]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'GetDynamicListRequest')
else:
_json = None
request = build_get_dynamic_list_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.get_dynamic_list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[object]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_dynamic_list.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/dynamicList'} # type: ignore
@distributed_trace
def retrieve_tool_func_result(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
flow_runtime_name=None, # type: Optional[str]
flow_id=None, # type: Optional[str]
body=None, # type: Optional["_models.RetrieveToolFuncResultRequest"]
**kwargs # type: Any
):
# type: (...) -> "_models.ToolFuncResponse"
"""retrieve_tool_func_result.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param flow_runtime_name:
:type flow_runtime_name: str
:param flow_id:
:type flow_id: str
:param body:
:type body: ~flow.models.RetrieveToolFuncResultRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ToolFuncResponse, or the result of cls(response)
:rtype: ~flow.models.ToolFuncResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ToolFuncResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'RetrieveToolFuncResultRequest')
else:
_json = None
request = build_retrieve_tool_func_result_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
content_type=content_type,
json=_json,
flow_runtime_name=flow_runtime_name,
flow_id=flow_id,
template_url=self.retrieve_tool_func_result.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ToolFuncResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_tool_func_result.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Tools/RetrieveToolFuncResult'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_tools_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_tools_operations.py",
"repo_id": "promptflow",
"token_count": 13696
} | 40 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=protected-access
import copy
import json
import os
import re
from datetime import datetime
from functools import cached_property
from pathlib import Path
from typing import Dict, List, Optional, Union
import requests
from azure.ai.ml._artifacts._artifact_utilities import _check_and_upload_path
from azure.ai.ml._scope_dependent_operations import (
OperationConfig,
OperationsContainer,
OperationScope,
_ScopeDependentOperations,
)
from azure.ai.ml.constants._common import SHORT_URI_FORMAT
from azure.ai.ml.entities import Workspace
from azure.ai.ml.operations._operation_orchestrator import OperationOrchestrator
from azure.core.exceptions import HttpResponseError
from promptflow._sdk._constants import (
CLIENT_FLOW_TYPE_2_SERVICE_FLOW_TYPE,
DAG_FILE_NAME,
MAX_LIST_CLI_RESULTS,
WORKSPACE_LINKED_DATASTORE_NAME,
FlowType,
ListViewType,
)
from promptflow._sdk._errors import FlowOperationError
from promptflow._sdk._telemetry import ActivityType, WorkspaceTelemetryMixin, monitor_operation
from promptflow._sdk._utils import PromptflowIgnoreFile
from promptflow._sdk._vendor._asset_utils import traverse_directory
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.azure._constants._flow import DEFAULT_STORAGE
from promptflow.azure._entities._flow import Flow
from promptflow.azure._load_functions import load_flow
from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller
from promptflow.azure.operations._artifact_utilities import _get_datastore_name, get_datastore_info
from promptflow.azure.operations._fileshare_storeage_helper import FlowFileStorageClient
from promptflow.exceptions import SystemErrorException, UserErrorException
logger = get_cli_sdk_logger()
class FlowOperations(WorkspaceTelemetryMixin, _ScopeDependentOperations):
"""FlowOperations that can manage flows.
You should not instantiate this class directly. Instead, you should
create a :class:`~promptflow.azure.PFClient` instance and this operation is available as the instance's attribute.
"""
_FLOW_RESOURCE_PATTERN = re.compile(r"azureml:.*?/workspaces/(?P<experiment_id>.*?)/flows/(?P<flow_id>.*?)$")
def __init__(
self,
operation_scope: OperationScope,
operation_config: OperationConfig,
all_operations: OperationsContainer,
credential,
service_caller: FlowServiceCaller,
workspace: Workspace,
**kwargs: Dict,
):
super().__init__(
operation_scope=operation_scope,
operation_config=operation_config,
workspace_name=operation_scope.workspace_name,
subscription_id=operation_scope.subscription_id,
resource_group_name=operation_scope.resource_group_name,
)
self._all_operations = all_operations
self._service_caller = service_caller
self._credential = credential
self._workspace = workspace
@cached_property
def _workspace_id(self):
return self._workspace._workspace_id
@cached_property
def _index_service_endpoint_url(self):
"""Get the endpoint url for the workspace."""
endpoint = self._service_caller._service_endpoint
return endpoint + "index/v1.0" + self._service_caller._common_azure_url_pattern
@monitor_operation(activity_name="pfazure.flows.create_or_update", activity_type=ActivityType.PUBLICAPI)
def create_or_update(self, flow: Union[str, Path], display_name=None, type=None, **kwargs) -> Flow:
"""Create a flow to remote from local source, or update the metadata of an existing flow.
.. note::
Functionality of updating flow metadata is yet to be supported.
:param flow: The source of the flow to create.
:type flow: Union[str, Path]
:param display_name: The display name of the flow to create. Default to be flow folder name + timestamp
if not specified. e.g. "web-classification-10-27-2023-14-19-10"
:type display_name: str
:param type: The type of the flow to create. One of ["standard", evaluation", "chat"].
Default to be "standard" if not specified.
:type type: str
:param description: The description of the flow to create. Default to be the description in flow yaml file.
:type description: str
:param tags: The tags of the flow to create. Default to be the tags in flow yaml file.
:type tags: Dict[str, str]
"""
# validate the parameters
azure_flow, flow_display_name, flow_type, kwargs = FlowOperations._validate_flow_creation_parameters(
flow, display_name, type, **kwargs
)
# upload to file share
file_share_flow_path = self._resolve_flow_code_and_upload_to_file_share(flow=azure_flow)
if not file_share_flow_path:
raise FlowOperationError(f"File share path should not be empty, got {file_share_flow_path!r}.")
# create flow to remote
flow_definition_file_path = f"{file_share_flow_path}/{DAG_FILE_NAME}"
rest_flow = self._create_remote_flow_via_file_share_path(
flow_display_name=flow_display_name,
flow_type=flow_type,
flow_definition_file_path=flow_definition_file_path,
**kwargs,
)
result_flow = Flow._from_pf_service(rest_flow)
flow_dict = result_flow._to_dict()
print(f"Flow created successfully:\n{json.dumps(flow_dict, indent=4)}")
return result_flow
@staticmethod
def _validate_flow_creation_parameters(source, flow_display_name=None, flow_type=None, **kwargs):
"""Validate the parameters for flow creation operation."""
# validate the source folder
logger.info("Validating flow source.")
if not Path(source, DAG_FILE_NAME).exists():
raise UserErrorException(
f"Flow source must be a directory with flow definition yaml '{DAG_FILE_NAME}'. "
f"Got {Path(source).resolve().as_posix()!r}."
)
# validate flow source with flow schema
logger.info("Validating flow schema.")
flow_dict = FlowOperations._validate_flow_schema(source, flow_display_name, flow_type, **kwargs)
logger.info("Validating flow creation parameters.")
flow = load_flow(source)
# if no flow name specified, use "flow name + timestamp"
flow_display_name = flow_dict.get("display_name", None)
if not flow_display_name:
flow_display_name = f"{Path(source).name}-{datetime.now().strftime('%m-%d-%Y-%H-%M-%S')}"
# if no flow type specified, use default flow type "standard"
flow_type = flow_dict.get("type", None)
if not flow_type:
flow_type = FlowType.STANDARD
# update description and tags to be the final value
description = flow_dict.get("description", None)
if isinstance(description, str):
kwargs["description"] = description
tags = flow_dict.get("tags", None)
if tags:
kwargs["tags"] = tags
return flow, flow_display_name, flow_type, kwargs
@staticmethod
def _validate_flow_schema(source, display_name=None, type=None, **kwargs):
"""Validate the flow schema."""
from promptflow._sdk.entities._flow import ProtectedFlow
params_override = copy.deepcopy(kwargs)
if display_name is not None:
params_override["display_name"] = display_name
if type is not None:
params_override["type"] = type
flow_entity = ProtectedFlow.load(source=source, params_override=params_override)
flow_entity._validate(raise_error=True) # raise error if validation failed
flow_dict = flow_entity._dump_for_validation()
return flow_dict
def _resolve_flow_code_and_upload_to_file_share(self, flow: Flow, ignore_tools_json=False) -> str:
remote_file_share_folder_name = f"{Path(flow.code).name}-{datetime.now().strftime('%m-%d-%Y-%H-%M-%S')}"
ops = OperationOrchestrator(self._all_operations, self._operation_scope, self._operation_config)
file_share_flow_path = ""
logger.info("Building flow code.")
with flow._build_code() as code:
if code is None:
raise FlowOperationError("Failed to build flow code.")
# ignore flow.tools.json if needed (e.g. for flow run scenario)
if ignore_tools_json:
ignore_file = code._ignore_file
if isinstance(ignore_file, PromptflowIgnoreFile):
ignore_file._ignore_tools_json = ignore_tools_json
else:
raise FlowOperationError(
message=f"Flow code should have PromptflowIgnoreFile, got {type(ignore_file)}"
)
code.datastore = DEFAULT_STORAGE
datastore_name = _get_datastore_name(datastore_name=DEFAULT_STORAGE)
datastore_operation = ops._code_assets._datastore_operation
datastore_info = get_datastore_info(datastore_operation, datastore_name)
logger.debug("Creating storage client for uploading flow to file share.")
storage_client = FlowFileStorageClient(
credential=datastore_info["credential"],
file_share_name=datastore_info["container_name"],
account_url=datastore_info["account_url"],
azure_cred=datastore_operation._credential,
)
# set storage client to flow operation, can be used in test case
self._storage_client = storage_client
# check if the file share directory exists
logger.debug("Checking if the file share directory exists.")
if storage_client._check_file_share_directory_exist(remote_file_share_folder_name):
raise FlowOperationError(
f"Remote flow folder {remote_file_share_folder_name!r} already exists under "
f"'{storage_client.file_share_prefix}'. Please change the flow folder name and try again."
)
try:
logger.info("Uploading flow directory to file share.")
storage_client.upload_dir(
source=code.path,
dest=remote_file_share_folder_name,
msg="test",
ignore_file=code._ignore_file,
show_progress=False,
)
except Exception as e:
raise FlowOperationError(f"Failed to upload flow to file share due to: {str(e)}.") from e
file_share_flow_path = f"{storage_client.file_share_prefix}/{remote_file_share_folder_name}"
logger.info(f"Successfully uploaded flow to file share path {file_share_flow_path!r}.")
return file_share_flow_path
def _create_remote_flow_via_file_share_path(
self, flow_display_name, flow_type, flow_definition_file_path, **kwargs
):
"""Create a flow to remote from file share path."""
service_flow_type = CLIENT_FLOW_TYPE_2_SERVICE_FLOW_TYPE[flow_type]
description = kwargs.get("description", None)
tags = kwargs.get("tags", None)
body = {
"flow_name": flow_display_name,
"flow_definition_file_path": flow_definition_file_path,
"flow_type": service_flow_type,
"description": description,
"tags": tags,
}
rest_flow_result = self._service_caller.create_flow(
subscription_id=self._operation_scope.subscription_id,
resource_group_name=self._operation_scope.resource_group_name,
workspace_name=self._operation_scope.workspace_name,
body=body,
)
return rest_flow_result
def get(self, name: str) -> Flow:
"""Get a flow from azure.
:param name: The name of the flow to get.
:type name: str
:return: The flow.
:rtype: ~promptflow.azure.entities.Flow
"""
try:
rest_flow = self._service_caller.get_flow(
subscription_id=self._operation_scope.subscription_id,
resource_group_name=self._operation_scope.resource_group_name,
workspace_name=self._operation_scope.workspace_name,
flow_id=name,
experiment_id=self._workspace_id, # for flow operations, current experiment id is workspace id
)
except HttpResponseError as e:
if e.status_code == 404:
raise FlowOperationError(f"Flow {name!r} not found.") from e
else:
raise FlowOperationError(f"Failed to get flow {name!r} due to: {str(e)}.") from e
flow = Flow._from_pf_service(rest_flow)
return flow
@monitor_operation(activity_name="pfazure.flows.list", activity_type=ActivityType.PUBLICAPI)
def list(
self,
max_results: int = MAX_LIST_CLI_RESULTS,
flow_type: Optional[FlowType] = None,
list_view_type: ListViewType = ListViewType.ACTIVE_ONLY,
include_others: bool = False,
**kwargs,
) -> List[Flow]:
"""List flows from azure.
:param max_results: The max number of runs to return, defaults to 50, max is 100
:type max_results: int
:param flow_type: The flow type, defaults to None, which means all flow types. Other supported flow types are
["standard", "evaluation", "chat"].
:type flow_type: Optional[FlowType]
:param list_view_type: The list view type, defaults to ListViewType.ACTIVE_ONLY
:type list_view_type: ListViewType
:param include_others: Whether to list flows owned by other users in the remote workspace, defaults to False
:type include_others: bool
:return: The list of flows.
:rtype: List[~promptflow.azure.entities.Flow]
"""
if not isinstance(max_results, int) or max_results < 1:
raise FlowOperationError(f"'max_results' must be a positive integer, got {max_results!r}")
normalized_flow_type = str(flow_type).lower()
if flow_type is not None and normalized_flow_type not in FlowType.get_all_values():
raise FlowOperationError(f"'flow_type' must be one of {FlowType.get_all_values()}, got {flow_type!r}.")
headers = self._service_caller._get_headers()
if list_view_type == ListViewType.ACTIVE_ONLY:
filter_archived = ["false"]
elif list_view_type == ListViewType.ARCHIVED_ONLY:
filter_archived = ["true"]
elif list_view_type == ListViewType.ALL:
filter_archived = ["true", "false"]
else:
raise FlowOperationError(
f"Invalid list view type: {list_view_type!r}, expecting one of ['ActiveOnly', 'ArchivedOnly', 'All']"
)
user_object_id, user_tenant_id = self._service_caller._get_user_identity_info()
payload = {
"filters": [
{"field": "type", "operator": "eq", "values": ["flows"]},
{"field": "annotations/isArchived", "operator": "eq", "values": filter_archived},
{
"field": "properties/creationContext/createdBy/userTenantId",
"operator": "eq",
"values": [user_tenant_id],
},
],
"freeTextSearch": "",
"order": [{"direction": "Desc", "field": "properties/creationContext/createdTime"}],
# index service can return 100 results at most
"pageSize": min(max_results, 100),
"skip": 0,
"includeTotalResultCount": True,
"searchBuilder": "AppendPrefix",
}
# add flow filter to only list flows from current user
if not include_others:
payload["filters"].append(
{
"field": "properties/creationContext/createdBy/userObjectId",
"operator": "eq",
"values": [user_object_id],
}
)
endpoint = self._index_service_endpoint_url
url = endpoint + "/entities"
response = requests.post(url, headers=headers, json=payload)
if response.status_code == 200:
entities = json.loads(response.text)
flow_entities = entities["value"]
else:
raise FlowOperationError(
f"Failed to get flows from index service. Code: {response.status_code}, text: {response.text}"
)
# transform to flow instances
flow_instances = []
for entity in flow_entities:
flow = Flow._from_index_service(entity)
flow_instances.append(flow)
return flow_instances
def _download(self, source, dest):
# TODO: support download flow
raise NotImplementedError("Not implemented yet")
def _resolve_arm_id_or_upload_dependencies(self, flow: Flow, ignore_tools_json=False) -> None:
ops = OperationOrchestrator(self._all_operations, self._operation_scope, self._operation_config)
# resolve flow's code
self._try_resolve_code_for_flow(flow=flow, ops=ops, ignore_tools_json=ignore_tools_json)
@classmethod
def _try_resolve_code_for_flow(cls, flow: Flow, ops: OperationOrchestrator, ignore_tools_json=False) -> None:
if flow.path:
# remote path
if flow.path.startswith("azureml://datastores"):
flow._code_uploaded = True
return
else:
raise ValueError("Path is required for flow.")
with flow._build_code() as code:
if code is None:
return
if flow._code_uploaded:
return
# TODO(2917889): generate flow meta for eager flow
if ignore_tools_json:
ignore_file = code._ignore_file
if isinstance(ignore_file, PromptflowIgnoreFile):
ignore_file._ignore_tools_json = ignore_tools_json
else:
raise SystemErrorException(
message=f"Flow code should have PromptflowIgnoreFile, got {type(ignore_file)}"
)
# flow directory per file upload summary
# as the upload logic locates in azure-ai-ml, we cannot touch during the upload
# copy the logic here to print per file upload summary
ignore_file = code._ignore_file
upload_paths = []
source_path = Path(code.path).resolve()
prefix = os.path.basename(source_path) + "/"
for root, _, files in os.walk(source_path, followlinks=True):
upload_paths += list(
traverse_directory(
root,
files,
prefix=prefix,
ignore_file=ignore_file,
)
)
ignore_files = code._ignore_file._get_ignore_list()
for file_path in ignore_files:
logger.debug(f"will ignore file: {file_path}...")
for file_path, _ in upload_paths:
logger.debug(f"will upload file: {file_path}...")
code.datastore = WORKSPACE_LINKED_DATASTORE_NAME
# NOTE: For flow directory upload, we prefer to upload it to the workspace linked datastore,
# therefore we will directly use _check_and_upload_path, instead of v2 SDK public API
# CodeOperations.create_or_update, as later one will upload the code asset to another
# container in the storage account, which may fail with vnet for MT.
# However, we might run into list secret permission error(especially in Heron workspace),
# in this case, we will leverage v2 SDK public API, which has solution for Heron,
# and request MT with the blob url;
# refer to except block for more details.
try:
uploaded_code_asset, _ = _check_and_upload_path(
artifact=code,
asset_operations=ops._code_assets,
artifact_type="Code",
datastore_name=WORKSPACE_LINKED_DATASTORE_NAME, # actually not work at all
show_progress=True,
)
path = uploaded_code_asset.path
path = path[path.find("LocalUpload") :] # path on container
flow.code = path
# azureml://datastores/workspaceblobstore/paths/<path-to-flow-dag-yaml>
flow.path = SHORT_URI_FORMAT.format(
WORKSPACE_LINKED_DATASTORE_NAME, (Path(path) / flow.path).as_posix()
)
except HttpResponseError as e:
# catch authorization error for list secret on datastore
if "AuthorizationFailed" in str(e) and "datastores/listSecrets/action" in str(e):
uploaded_code_asset = ops._code_assets.create_or_update(code)
path = uploaded_code_asset.path
path = path.replace(".blob.core.windows.net:443/", ".blob.core.windows.net/") # remove :443 port
flow.code = path
# https://<storage-account-name>.blob.core.windows.net/<container-name>/<path-to-flow-dag-yaml>
flow.path = f"{path}/{flow.path}"
else:
raise
flow._code_uploaded = True
# region deprecated but keep for runtime test dependencies
def _resolve_arm_id_or_upload_dependencies_to_file_share(self, flow: Flow) -> None:
ops = OperationOrchestrator(self._all_operations, self._operation_scope, self._operation_config)
# resolve flow's code
self._try_resolve_code_for_flow_to_file_share(flow=flow, ops=ops)
@classmethod
def _try_resolve_code_for_flow_to_file_share(cls, flow: Flow, ops: OperationOrchestrator) -> None:
from azure.ai.ml._utils._storage_utils import AzureMLDatastorePathUri
from ._artifact_utilities import _check_and_upload_path
if flow.path:
if flow.path.startswith("azureml://datastores"):
# remote path
path_uri = AzureMLDatastorePathUri(flow.path)
if path_uri.datastore != DEFAULT_STORAGE:
raise ValueError(f"Only {DEFAULT_STORAGE} is supported as remote storage for now.")
flow.path = path_uri.path
flow._code_uploaded = True
return
else:
raise ValueError("Path is required for flow.")
with flow._build_code() as code:
if code is None:
return
if flow._code_uploaded:
return
code.datastore = DEFAULT_STORAGE
uploaded_code_asset = _check_and_upload_path(
artifact=code,
asset_operations=ops._code_assets,
artifact_type="Code",
show_progress=False,
)
if "remote_path" in uploaded_code_asset:
path = uploaded_code_asset["remote_path"]
elif "remote path" in uploaded_code_asset:
path = uploaded_code_asset["remote path"]
flow.code = path
flow.path = (Path(path) / flow.path).as_posix()
flow._code_uploaded = True
# endregion
| promptflow/src/promptflow/promptflow/azure/operations/_flow_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/operations/_flow_operations.py",
"repo_id": "promptflow",
"token_count": 10428
} | 41 |
import base64
import filetype
import hashlib
from typing import Callable, Optional
class PFBytes(bytes):
"""This class is used to represent a bytes object in PromptFlow.
It has all the functionalities of a bytes object,
and also has some additional methods to help with serialization and deserialization.
"""
def __new__(cls, value: bytes, *args, **kwargs):
# Here we must only pass the value to the bytes constructor,
# otherwise we will get a type error that the constructor doesn't take such args.
# See https://docs.python.org/3/reference/datamodel.html#object.__new__
return super().__new__(cls, value)
def __init__(self, value: bytes, mime_type: str, source_url: Optional[str] = None):
# Here the first argument should also be "value", the same as __new__.
# Otherwise we will get error when initialize the object.
super().__init__()
# Use this hash to identify this bytes.
self._hash = hashlib.sha1(value).hexdigest()[:8]
self._mime_type = mime_type.lower()
self._source_url = source_url
@property
def source_url(self):
return self._source_url
def to_base64(self, with_type: bool = False, dict_type: bool = False):
"""Returns the base64 representation of the PFBytes."""
if with_type:
if not dict_type:
return f"data:{self._mime_type};base64," + base64.b64encode(self).decode("utf-8")
return {f"data:{self._mime_type};base64": base64.b64encode(self).decode("utf-8")}
return base64.b64encode(self).decode("utf-8")
class Image(PFBytes):
"""This class is used to represent an image in PromptFlow. It is a subclass of
~promptflow.contracts.multimedia.PFBytes.
"""
def __init__(self, value: bytes, mime_type: str = None, source_url: Optional[str] = None):
if mime_type is None:
mime_type = filetype.guess_mime(value)
if mime_type is None or not mime_type.startswith("image/"):
mime_type = "image/*"
return super().__init__(value, mime_type, source_url)
def __str__(self):
return f"Image({self._hash})"
def __repr__(self) -> str:
return f"Image({self._hash})"
def serialize(self, encoder: Callable = None):
"""Serialize the image to a dictionary."""
if encoder is None:
return self.__str__()
return encoder(self)
| promptflow/src/promptflow/promptflow/contracts/multimedia.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/contracts/multimedia.py",
"repo_id": "promptflow",
"token_count": 977
} | 42 |
import contextvars
import multiprocessing
import os
import queue
import signal
import sys
import threading
import time
from datetime import datetime
from functools import partial
from logging import INFO
from multiprocessing import Manager, Queue
from multiprocessing.pool import ThreadPool
from typing import List, Optional, Union
import psutil
from promptflow._constants import LINE_NUMBER_KEY, LINE_TIMEOUT_SEC
from promptflow._core._errors import ProcessPoolError, UnexpectedError
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunTracker
from promptflow._utils.dataclass_serializer import convert_eager_flow_output_to_dict
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow._utils.logger_utils import bulk_logger
from promptflow._utils.multimedia_utils import _process_recursively, persist_multimedia_data
from promptflow._utils.thread_utils import RepeatLogTimer
from promptflow._utils.utils import get_int_env_var, log_progress, set_context
from promptflow.contracts.multimedia import Image
from promptflow.contracts.run_info import FlowRunInfo
from promptflow.contracts.run_info import RunInfo as NodeRunInfo
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, PromptflowException
from promptflow.executor._errors import (
BatchExecutionTimeoutError,
LineExecutionTimeoutError,
ProcessCrashError,
ProcessInfoObtainedTimeout,
ProcessTerminatedTimeout,
)
from promptflow.executor._process_manager import ForkProcessManager, SpawnProcessManager
from promptflow.executor._result import LineResult
from promptflow.executor._script_executor import ScriptExecutor
from promptflow.executor.flow_executor import DEFAULT_CONCURRENCY_BULK, FlowExecutor
from promptflow.storage import AbstractRunStorage
def signal_handler(signum, frame):
signame = signal.Signals(signum).name
bulk_logger.info("Execution stopping. Handling signal %s (%s)", signame, signum)
try:
process = psutil.Process(os.getpid())
bulk_logger.info("Successfully terminated process with pid %s", process.pid)
process.terminate()
except Exception:
bulk_logger.warning("Error when handling execution stop signal", exc_info=True)
finally:
sys.exit(1)
class QueueRunStorage(AbstractRunStorage):
"""This storage persists run info by putting it into a queue."""
def __init__(self, queue: Queue):
self.queue = queue
def persist_node_run(self, run_info: NodeRunInfo):
self.queue.put(run_info)
def persist_flow_run(self, run_info: FlowRunInfo):
self.queue.put(run_info)
def format_current_process_info(process_name, pid, line_number: int):
return f"Process name({process_name})-Process id({pid})-Line number({line_number})"
def log_process_status(process_name, pid, line_number: int, is_completed=False, is_failed=False):
process_info = format_current_process_info(process_name, pid, line_number)
if is_completed:
bulk_logger.info(f"{process_info} completed.")
elif is_failed:
bulk_logger.info(f"{process_info} failed.")
else:
bulk_logger.info(f"{process_info} start execution.")
class LineExecutionProcessPool:
_DEFAULT_WORKER_COUNT = 4
_PROCESS_TERMINATED_TIMEOUT = 60
_PROCESS_INFO_OBTAINED_TIMEOUT = 60
def __init__(
self,
flow_executor: FlowExecutor,
nlines,
run_id,
output_dir,
batch_timeout_sec: Optional[int] = None,
line_timeout_sec: Optional[int] = None,
):
self._nlines = nlines
self._run_id = run_id
multiprocessing_start_method = os.environ.get("PF_BATCH_METHOD", multiprocessing.get_start_method())
sys_start_methods = multiprocessing.get_all_start_methods()
if multiprocessing_start_method not in sys_start_methods:
bulk_logger.warning(
f"Failed to set start method to '{multiprocessing_start_method}', "
f"start method {multiprocessing_start_method} is not in: {sys_start_methods}."
)
bulk_logger.info(f"Set start method to default {multiprocessing.get_start_method()}.")
multiprocessing_start_method = multiprocessing.get_start_method()
use_fork = multiprocessing_start_method in ["fork", "forkserver"]
self._flow_file = flow_executor._flow_file
self._connections = flow_executor._connections
self._working_dir = flow_executor._working_dir
self._use_fork = use_fork
if isinstance(flow_executor, ScriptExecutor):
self._storage = flow_executor._storage
else:
self._storage = flow_executor._run_tracker._storage
self._flow_id = flow_executor._flow_id
self._log_interval = flow_executor._log_interval
self._line_timeout_sec = line_timeout_sec or LINE_TIMEOUT_SEC
self._batch_timeout_sec = batch_timeout_sec
self._output_dir = output_dir
self._flow_create_kwargs = {
"flow_file": flow_executor._flow_file,
"connections": flow_executor._connections,
"working_dir": flow_executor._working_dir,
"entry": flow_executor._entry,
"line_timeout_sec": self._line_timeout_sec,
"raise_ex": False,
}
def __enter__(self):
manager = Manager()
self._processing_idx = manager.dict()
self._completed_idx = manager.dict()
self._task_queue = Queue()
self._n_process = self._determine_worker_count()
# When using fork, we first spawn a sub process, the SemLock created in fork context (multiprocessing.Queue())
# can't used in a spawn context. Since spawn does not share memory, synchronization primitives created by
# fork cannot be used directly. It will cause an error: "A SemLock created in a fork context is being
# shared with a process in a spawn context. This is not supported".
# So use multiprocessing.Manager().Queue() instead of multiprocessing.Queue().
# Manager().Queue() operates through a manager server process, which passes messages between different
# processes without directly sharing memory state, which makes it safe to use in a spawn context.
self._input_queues = [manager.Queue() for _ in range(self._n_process)]
self._output_queues = [manager.Queue() for _ in range(self._n_process)]
self._control_signal_queue = manager.Queue()
self._process_info = manager.dict()
# when using fork, we first create a process with spawn method to establish a clean environment
# Then fork the subprocess in this environment to avoid some deadlock problems
common_kwargs = {
"input_queues": self._input_queues,
"output_queues": self._output_queues,
"process_info": self._process_info,
"process_target_func": _process_wrapper,
}
if self._use_fork:
# 1. Create input_queue, output_queue, control_signal_queue and _process_info in the main process.
# 2. Pass the above queue/dict as parameters to spawn and fork processes to transfer information
# between processes.
self._processes_manager = ForkProcessManager(
self._control_signal_queue,
self._flow_create_kwargs,
**common_kwargs,
)
else:
executor_creation_func = partial(FlowExecutor.create, **self._flow_create_kwargs)
# 1. Create input_queue, output_queue, and _process_info in the main process.
# 2. Spawn _n_process sub-process and pass the above queue/dict to these sub-process to transfer information
# between main process and sub process.
self._processes_manager = SpawnProcessManager(executor_creation_func, **common_kwargs)
self._processes_manager.start_processes()
self._processes_manager.ensure_healthy()
monitor_pool = ThreadPool(self._n_process, initializer=set_context, initargs=(contextvars.copy_context(),))
self._monitor_pool = monitor_pool
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self._monitor_pool is not None:
self._monitor_pool.close()
self._monitor_pool.join()
def _get_process_info(self, index):
start_time = time.time()
while True:
self._processes_manager.ensure_healthy()
try:
if time.time() - start_time > self._PROCESS_INFO_OBTAINED_TIMEOUT:
raise ProcessInfoObtainedTimeout(self._PROCESS_INFO_OBTAINED_TIMEOUT)
# Try to get process id and name from the process_info
process_id = self._process_info[index].process_id
process_name = self._process_info[index].process_name
return (index, process_id, process_name)
except KeyError:
# If the process_info does not exist for the given index, it means the process have not ready yet,
# try again.
time.sleep(1)
continue
except Exception as e:
raise Exception(f"Unexpected error occurred while get process info. Exception: {e}")
def _ensure_process_terminated_within_timeout(self, process_id):
start_time = time.time()
while psutil.pid_exists(process_id):
if time.time() - start_time > self._PROCESS_TERMINATED_TIMEOUT:
raise ProcessTerminatedTimeout(self._PROCESS_TERMINATED_TIMEOUT)
time.sleep(1)
def _is_process_alive(self, process_id):
return psutil.pid_exists(process_id)
def _handle_output_queue_messages(self, output_queue: Queue, result_list):
try:
message = output_queue.get(timeout=1)
if isinstance(message, LineResult):
message = self._process_multimedia(message)
result_list.append(message)
return message
elif isinstance(message, FlowRunInfo):
self._storage.persist_flow_run(message)
return message
elif isinstance(message, NodeRunInfo):
self._storage.persist_node_run(message)
return message
except queue.Empty:
pass
return None
def _monitor_workers_and_process_tasks_in_thread(
self,
task_queue: Queue,
result_list: List[LineResult],
index: int,
input_queue: Queue,
output_queue: Queue,
batch_start_time: datetime,
):
index, process_id, process_name = self._get_process_info(index)
# Entering the while loop requires two conditions:
# 1. The task queue is not empty, meaning there are lines yet to be executed.
# 2. The batch run has not reached the batch timeout limit.
while not self._batch_timeout_expired(batch_start_time):
self._processes_manager.ensure_healthy()
try:
# Get task from task_queue
inputs, line_number, run_id = task_queue.get(timeout=1)
except queue.Empty:
break
# Calculate the line timeout for the current line.
line_timeout_sec = self._line_timeout_sec
if self._batch_timeout_sec:
remaining_execution_time = (
self._batch_timeout_sec - (datetime.utcnow() - batch_start_time).total_seconds()
)
if remaining_execution_time <= 0:
break
line_timeout_sec = min(line_timeout_sec, remaining_execution_time)
# Put task into input_queue
args = (inputs, line_number, run_id, line_timeout_sec)
input_queue.put(args)
self._processing_idx[line_number] = format_current_process_info(process_name, process_id, line_number)
log_process_status(process_name, process_id, line_number)
start_time = datetime.utcnow()
completed = False
crashed = False
returned_node_run_infos = {}
# Responsible for checking the output queue messages and processing them within a specified timeout period.
while not self._batch_timeout_expired(batch_start_time) and not self._line_timeout_expired(start_time):
# Monitor process aliveness.
crashed = not self._is_process_alive(process_id)
if crashed:
break
# Handle output queue message.
message = self._handle_output_queue_messages(output_queue, result_list)
if isinstance(message, LineResult):
completed = True
break
if isinstance(message, NodeRunInfo):
returned_node_run_infos[message.node] = message
# Handle line execution completed.
if completed:
self._completed_idx[line_number] = format_current_process_info(process_name, process_id, line_number)
log_process_status(process_name, process_id, line_number, is_completed=True)
# Handle line execution is not completed.
else:
ex = None
# Handle process crashed.
if crashed:
bulk_logger.warning(f"Process crashed while executing line {line_number}.")
ex = ProcessCrashError(line_number)
# Handle line execution timeout.
elif self._line_timeout_expired(start_time):
bulk_logger.warning(f"Line {line_number} timeout after {self._line_timeout_sec} seconds.")
ex = LineExecutionTimeoutError(line_number, self._line_timeout_sec)
# Handle batch execution timeout.
elif self._batch_timeout_expired(batch_start_time):
bulk_logger.warning(
f"Line {line_number} execution terminated due to the total "
f"batch run exceeding the batch timeout ({self._batch_timeout_sec}s)."
)
ex = BatchExecutionTimeoutError(line_number, self._batch_timeout_sec)
else:
# This branch should not be reached, add this warning for the case.
msg = f"Unexpected error occurred while monitoring line execution at line {line_number}."
bulk_logger.warning(msg)
ex = UnexpectedError(msg)
result = self._generate_line_result_for_exception(
inputs,
run_id,
line_number,
self._flow_id,
start_time,
ex,
returned_node_run_infos,
)
result_list.append(result)
self._completed_idx[line_number] = format_current_process_info(process_name, process_id, line_number)
log_process_status(process_name, process_id, line_number, is_failed=True)
# If there are still tasks in the task_queue and the batch run does not exceed the batch timeout,
# restart a new process to execute the task.
run_finished = task_queue.empty() or self._batch_timeout_expired(batch_start_time)
if not run_finished:
self._processes_manager.restart_process(index)
# We need to ensure the process has been killed before continuing to execute.
# Otherwise the process will receive new task, and during the execution, the process
# is killed, which will result in the 'ProcessCrashError'.
self._ensure_process_terminated_within_timeout(process_id)
index, process_id, process_name = self._get_process_info(index)
self._processing_idx.pop(line_number)
# End the process when the batch timeout is exceeded or when all lines have been executed.
self._processes_manager.end_process(index)
# In fork mode, the main process and the sub spawn process communicate through _process_info.
# We need to ensure the process has been killed before returning. Otherwise, it may cause
# the main process have exited but the spawn process is still alive.
# At this time, a connection error will be reported.
self._ensure_process_terminated_within_timeout(process_id)
def _batch_timeout_expired(self, start_time: datetime) -> bool:
if self._batch_timeout_sec is None:
return False
return (datetime.utcnow() - start_time).total_seconds() > self._batch_timeout_sec + 10
def _line_timeout_expired(self, start_time: datetime) -> bool:
# Here we add more seconds because of the following reasons:
# 1. At the last second, there would be several timeout message from exec_line.
# 2. It may take time to create worker so actual timeout time may be longer.
return (datetime.utcnow() - start_time).total_seconds() > self._line_timeout_sec + 10
def _process_multimedia(self, result: LineResult) -> LineResult:
"""Replace multimedia data in line result with string place holder to prevent OOM
and persist multimedia data in output when batch running."""
if not self._output_dir:
return result
self._process_multimedia_in_flow_run(result.run_info)
for node_name, node_run_info in result.node_run_infos.items():
result.node_run_infos[node_name] = self._process_multimedia_in_node_run(node_run_info)
result.output = persist_multimedia_data(result.output, self._output_dir)
return result
def _process_multimedia_in_run_info(self, run_info: Union[FlowRunInfo, NodeRunInfo]):
# Persist and convert images in inputs to path dictionaries.
# This replaces any image objects with their corresponding file path dictionaries.
if run_info.inputs:
run_info.inputs = self._persist_and_convert_images_to_path_dicts(run_info.inputs)
# Persist and convert images in output to path dictionaries.
# This replaces any image objects with their corresponding file path dictionaries.
if run_info.output:
serialized_output = self._persist_and_convert_images_to_path_dicts(run_info.output)
run_info.output = serialized_output
run_info.result = None
# Persist and convert images in api_calls to path dictionaries.
# The `inplace=True` parameter is used here to ensure that the original list structure holding generator outputs
# is maintained. This allows us to keep tracking the list as it dynamically changes when the generator is
# consumed. It is crucial to process the api_calls list in place to avoid losing the reference to the list that
# holds the generator items, which is essential for tracing generator execution.
if run_info.api_calls:
run_info.api_calls = self._persist_and_convert_images_to_path_dicts(run_info.api_calls, inplace=True)
return run_info
def _process_multimedia_in_flow_run(self, run_info: FlowRunInfo):
self._process_multimedia_in_run_info(run_info)
def _process_multimedia_in_node_run(self, run_info: NodeRunInfo):
run_info = self._process_multimedia_in_run_info(run_info)
return run_info
def _persist_and_convert_images_to_path_dicts(self, value, inplace=False):
serialization_funcs = {Image: partial(Image.serialize, **{"encoder": None})}
return _process_recursively(value, process_funcs=serialization_funcs, inplace=inplace)
def _generate_line_result_for_exception(
self,
inputs,
run_id,
line_number,
flow_id,
start_time,
ex,
node_run_infos={},
) -> LineResult:
bulk_logger.error(f"Line {line_number}, Process {os.getpid()} failed with exception: {ex}")
run_info = FlowRunInfo(
run_id=f"{run_id}_{line_number}",
status=Status.Failed,
error=ExceptionPresenter.create(ex).to_dict(include_debug_info=True),
inputs=inputs,
output=None,
metrics=None,
request=None,
parent_run_id=run_id,
root_run_id=run_id,
source_run_id=None,
flow_id=flow_id,
start_time=start_time,
end_time=datetime.utcnow(),
index=line_number,
)
result = LineResult(
output={},
aggregation_inputs={},
run_info=run_info,
node_run_infos=node_run_infos,
)
# TODO: There is a corner case that the run info is persisted in the subprocess when timeouted,
# while we also persist the run info here. This may cause duplicate run info in the storage.
# We need to find a way to avoid this.
self._storage.persist_flow_run(result.run_info)
return result
def run(self, batch_inputs):
for index, inputs in batch_inputs:
self._task_queue.put(
(
inputs,
index,
self._run_id,
)
)
result_list = []
run_start_time = datetime.utcnow()
with RepeatLogTimer(
interval_seconds=self._log_interval,
logger=bulk_logger,
level=INFO,
log_message_function=self._generate_thread_status_messages,
args=(
self._monitor_pool,
self._nlines,
),
):
try:
batch_start_time = datetime.utcnow()
args_list = [
(
self._task_queue, # Shared task queue for all sub processes to read the input data.
result_list, # Line result list of the batch run.
i, # Index of the sub process.
# Specific input queue for sub process, used to send input data to it.
self._input_queues[i],
# Specific output queue for the sub process, used to receive results from it.
self._output_queues[i],
batch_start_time,
)
for i in range(self._n_process)
]
# The variable 'async_result' here is not the actual result of the batch run
# but an AsyncResult object that can be used to check if the execution are finished
# The actual results of the batch run are stored in 'result_list'
# Create _n_process monitoring threads, mainly used to assign tasks and receive line result.
# When task_queue is empty, end the process.
# When line execution timeout or process crash, restart the process.
async_result = self._monitor_pool.starmap_async(
self._monitor_workers_and_process_tasks_in_thread, args_list
)
try:
# Only log when the number of results changes to avoid duplicate logging.
last_log_count = 0
# Wait for batch run to complete or KeyboardInterrupt
while not async_result.ready():
current_result_count = len(result_list)
if current_result_count != last_log_count:
log_progress(
run_start_time=run_start_time,
logger=bulk_logger,
count=len(result_list),
total_count=self._nlines,
)
last_log_count = current_result_count
# Check every 1 second
async_result.wait(1)
# To ensure exceptions in thread-pool calls are propagated to the main process for proper handling
# The exceptions raised will be re-raised by the get() method.
# Related link:
# https://docs.python.org/3/library/multiprocessing.html#multiprocessing.pool.AsyncResult
async_result.get()
except KeyboardInterrupt:
raise
except PromptflowException:
raise
except Exception as e:
bulk_logger.error(f"ProcessPool failed with exception: {e}")
raise ProcessPoolError(
message_format=f"ProcessPool failed with exception: {e}",
target=ErrorTarget.EXECUTOR,
) from e
return result_list
def _generate_thread_status_messages(self, pool: ThreadPool, total_count: int):
msgs = []
active_threads = sum(thread.is_alive() for thread in pool._pool)
msgs.append(f"[Process Pool] [Active processes: {active_threads} / {len(pool._pool)}]")
processing_lines_copy = self._processing_idx.copy()
completed_lines_copy = self._completed_idx.copy()
msgs.append(
f"[Lines] [Finished: {len(completed_lines_copy)}] [Processing: {len(processing_lines_copy)}] "
f"[Pending: {total_count - len(processing_lines_copy) - len(completed_lines_copy)}]"
)
lines = []
for idx, thread_name in sorted(processing_lines_copy.items()):
lines.append(f"line {idx} ({thread_name})")
if len(lines) > 0:
msgs.append("Processing Lines: " + ", ".join(lines) + ".")
return msgs
def _determine_worker_count(self):
worker_count = get_int_env_var("PF_WORKER_COUNT")
# Starting a new process in non-fork mode requires to allocate memory. Calculate the maximum number of processes
# based on available memory to avoid memory bursting.
estimated_available_worker_count = get_available_max_worker_count() if not self._use_fork else None
# If the environment variable PF_WORKER_COUNT exists and valid, use the value as the worker_count.
if worker_count is not None and worker_count > 0:
self._log_set_worker_count(worker_count, estimated_available_worker_count)
return worker_count
# If the environment variable PF_WORKER_COUNT is not set or invalid, take the minimum value among the
# factors: default_worker_count, row_count and estimated_worker_count_based_on_memory_usage
factors = {
"default_worker_count": self._DEFAULT_WORKER_COUNT,
"row_count": self._nlines,
"estimated_worker_count_based_on_memory_usage": estimated_available_worker_count,
}
valid_factors = {k: v for k, v in factors.items() if v is not None and v > 0}
# Take the minimum value as the result
worker_count = min(valid_factors.values())
bulk_logger.info(
f"Set process count to {worker_count} by taking the minimum value among the factors of {valid_factors}."
)
return worker_count
def _log_set_worker_count(self, worker_count, estimated_available_worker_count):
bulk_logger.info(f"Set process count to {worker_count} with the environment variable 'PF_WORKER_COUNT'.")
if estimated_available_worker_count is not None and estimated_available_worker_count < worker_count:
bulk_logger.warning(
f"The current process count ({worker_count}) is larger than recommended process count "
f"({estimated_available_worker_count}) that estimated by system available memory. This may "
f"cause memory exhaustion"
)
def _exec_line(
executor: FlowExecutor, output_queue: Queue, *, inputs: dict, run_id: str, index: int, line_timeout_sec: int
):
try:
line_result = executor.exec_line(
inputs=inputs,
run_id=run_id,
index=index,
node_concurrency=DEFAULT_CONCURRENCY_BULK,
line_timeout_sec=line_timeout_sec,
)
if line_result is not None:
# For eager flow, the output may be a dataclass which is not picklable, we need to convert it to dict.
if not isinstance(line_result.output, dict):
line_result.output = convert_eager_flow_output_to_dict(line_result.output)
line_result.output.pop(LINE_NUMBER_KEY, None)
# TODO: Put serialized line result into queue to catch serialization error beforehand.
# Otherwise it might cause the process to hang, e.g, line failed because output is not seralizable.
if line_result is not None and line_result.run_info.status == Status.Failed:
line_result.output = {}
return line_result
except Exception as e:
bulk_logger.error(f"Line {index}, Process {os.getpid()} failed with exception: {e}")
flow_id = executor._flow_id
line_run_id = run_id if index is None else f"{run_id}_{index}"
# If line execution failed before start, there is no flow information in the run_tracker.
# So we call start_flow_run before handling exception to make sure the run_tracker has flow info.
if isinstance(executor, ScriptExecutor):
run_tracker = RunTracker(executor._storage)
else:
run_tracker = executor._run_tracker
run_tracker.start_flow_run(flow_id, run_id, line_run_id, run_id)
run_info = run_tracker.end_run(f"{run_id}_{index}", ex=e)
output_queue.put(run_info)
result = LineResult(
output={},
aggregation_inputs={},
run_info=run_info,
node_run_infos={},
)
return result
def _process_wrapper(
executor_creation_func,
input_queue: Queue,
output_queue: Queue,
log_context_initialization_func,
operation_contexts_dict: dict,
):
if threading.current_thread() is threading.main_thread():
signal.signal(signal.SIGINT, signal_handler)
else:
bulk_logger.info("Current thread is not main thread, skip signal handler registration in batch process pool.")
OperationContext.get_instance().update(operation_contexts_dict) # Update the operation context for the new process.
if log_context_initialization_func:
with log_context_initialization_func():
exec_line_for_queue(executor_creation_func, input_queue, output_queue)
else:
exec_line_for_queue(executor_creation_func, input_queue, output_queue)
def create_executor_fork(*, flow_executor: FlowExecutor, storage: AbstractRunStorage):
if isinstance(flow_executor, ScriptExecutor):
return ScriptExecutor(
flow_file=flow_executor._flow_file,
entry=flow_executor._entry,
connections=flow_executor._connections,
working_dir=flow_executor._working_dir,
storage=storage,
)
else:
run_tracker = RunTracker(run_storage=storage)
return FlowExecutor(
flow=flow_executor._flow,
connections=flow_executor._connections,
run_tracker=run_tracker,
cache_manager=flow_executor._cache_manager,
loaded_tools=flow_executor._loaded_tools,
raise_ex=False,
line_timeout_sec=flow_executor._line_timeout_sec,
)
def exec_line_for_queue(executor_creation_func, input_queue: Queue, output_queue: Queue):
run_storage = QueueRunStorage(output_queue)
executor: FlowExecutor = executor_creation_func(storage=run_storage)
while True:
try:
inputs, line_number, run_id, line_timeout_sec = input_queue.get(timeout=1)
result = _exec_line(
executor=executor,
output_queue=output_queue,
inputs=inputs,
run_id=run_id,
index=line_number,
line_timeout_sec=line_timeout_sec,
)
output_queue.put(result)
except queue.Empty:
# Do nothing until the input_queue have content or process is killed
# TODO: Exit the process more gracefully.
pass
def get_available_max_worker_count():
pid = os.getpid()
mem_info = psutil.virtual_memory()
available_memory = mem_info.available / (1024 * 1024) # in MB
process = psutil.Process(pid)
process_memory_info = process.memory_info()
process_memory = process_memory_info.rss / (1024 * 1024) # in MB
estimated_available_worker_count = int(available_memory // process_memory)
if estimated_available_worker_count < 1:
# TODO: For the case of vector db, Optimize execution logic
# 1. Let the main process not consume memory because it does not actually invoke
# 2. When the degree of parallelism is 1, main process executes the task directly and not
# create the child process
bulk_logger.warning(
f"Current system's available memory is {available_memory}MB, less than the memory "
f"{process_memory}MB required by the process. The maximum available worker count is 1."
)
estimated_available_worker_count = 1
else:
bulk_logger.info(
f"Current system's available memory is {available_memory}MB, "
f"memory consumption of current process is {process_memory}MB, "
f"estimated available worker count is {available_memory}/{process_memory} "
f"= {estimated_available_worker_count}"
)
return estimated_available_worker_count
| promptflow/src/promptflow/promptflow/executor/_line_execution_process_pool.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_line_execution_process_pool.py",
"repo_id": "promptflow",
"token_count": 14550
} | 43 |
[tool.black]
line-length = 120
[tool.pytest.ini_options]
markers = [
"sdk_test",
"cli_test",
"unittest",
"e2etest",
"flaky",
"endpointtest",
"mt_endpointtest",
]
[tool.coverage.run]
omit = [
# omit anything in a _restclient directory anywhere
"*/_restclient/*",
]
| promptflow/src/promptflow/pyproject.toml/0 | {
"file_path": "promptflow/src/promptflow/pyproject.toml",
"repo_id": "promptflow",
"token_count": 139
} | 44 |
import json
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._core._errors import FlowOutputUnserializable, InvalidSource
from promptflow._core.tools_manager import APINotFound
from promptflow._sdk._constants import DAG_FILE_NAME
from promptflow._utils.utils import dump_list_to_jsonl
from promptflow.batch import BatchEngine
from promptflow.contracts._errors import FailedToImportModule
from promptflow.executor import FlowExecutor
from promptflow.executor._errors import (
ConnectionNotFound,
DuplicateNodeName,
EmptyOutputReference,
InputNotFound,
InputReferenceNotFound,
InputTypeError,
InvalidConnectionType,
NodeCircularDependency,
NodeInputValidationError,
NodeReferenceNotFound,
OutputReferenceNotFound,
ResolveToolError,
SingleNodeValidationError,
)
from ..utils import FLOW_ROOT, WRONG_FLOW_ROOT, get_flow_folder, get_flow_inputs_file, get_yaml_file
@pytest.mark.usefixtures("use_secrets_config_file", "dev_connections")
@pytest.mark.e2etest
class TestValidation:
@pytest.mark.parametrize(
"flow_folder, yml_file, error_class, inner_class, error_msg",
[
(
"flow_llm_with_wrong_conn",
"flow.dag.yaml",
ResolveToolError,
InvalidConnectionType,
(
"Tool load failed in 'wrong_llm': "
"(InvalidConnectionType) Connection type CustomConnection is not supported for LLM."
),
),
(
"nodes_names_duplicated",
"flow.dag.yaml",
DuplicateNodeName,
None,
(
"Invalid node definitions found in the flow graph. Node with name 'stringify_num' appears more "
"than once in the node definitions in your flow, which is not allowed. To "
"address this issue, please review your flow and either rename or remove "
"nodes with identical names."
),
),
(
"source_file_missing",
"flow.dag.jinja.yaml",
ResolveToolError,
InvalidSource,
(
"Tool load failed in 'summarize_text_content': (InvalidSource) "
"Node source path 'summarize_text_content__variant_1.jinja2' is invalid on node "
"'summarize_text_content'."
),
),
(
"node_reference_not_found",
"flow.dag.yaml",
NodeReferenceNotFound,
None,
(
"Invalid node definitions found in the flow graph. Node 'divide_num_2' references a non-existent "
"node 'divide_num_3' in your flow. Please review your flow to ensure that the "
"node name is accurately specified."
),
),
(
"node_circular_dependency",
"flow.dag.yaml",
NodeCircularDependency,
None,
(
"Invalid node definitions found in the flow graph. Node circular dependency has been detected "
"among the nodes in your flow. Kindly review the reference relationships for "
"the nodes ['divide_num', 'divide_num_1', 'divide_num_2'] and resolve the "
"circular reference issue in the flow."
),
),
(
"flow_input_reference_invalid",
"flow.dag.yaml",
InputReferenceNotFound,
None,
(
"Invalid node definitions found in the flow graph. Node 'divide_num' references flow input 'num_1' "
"which is not defined in your flow. To resolve this issue, please review your "
"flow, ensuring that you either add the missing flow inputs or adjust node "
"reference to the correct flow input."
),
),
(
"flow_output_reference_invalid",
"flow.dag.yaml",
EmptyOutputReference,
None,
(
"The output 'content' for flow is incorrect. The reference is not specified for the output "
"'content' in the flow. To rectify this, ensure that you accurately specify "
"the reference in the flow."
),
),
(
"outputs_reference_not_valid",
"flow.dag.yaml",
OutputReferenceNotFound,
None,
(
"The output 'content' for flow is incorrect. The output 'content' references non-existent "
"node 'another_stringify_num' in your flow. To resolve this issue, please "
"carefully review your flow and correct the reference definition for the "
"output in question."
),
),
(
"outputs_with_invalid_flow_inputs_ref",
"flow.dag.yaml",
OutputReferenceNotFound,
None,
(
"The output 'num' for flow is incorrect. The output 'num' references non-existent flow "
"input 'num11' in your flow. Please carefully review your flow and correct "
"the reference definition for the output in question."
),
),
],
)
def test_executor_create_failure_type_and_message(
self, flow_folder, yml_file, error_class, inner_class, error_msg, dev_connections
):
with pytest.raises(error_class) as exc_info:
FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT, yml_file), dev_connections)
if isinstance(exc_info.value, ResolveToolError):
assert isinstance(exc_info.value.inner_exception, inner_class)
assert error_msg == exc_info.value.message
@pytest.mark.parametrize(
"flow_folder, yml_file, error_class, inner_class",
[
("source_file_missing", "flow.dag.python.yaml", ResolveToolError, InvalidSource),
],
)
def test_executor_create_failure_type(self, flow_folder, yml_file, error_class, inner_class, dev_connections):
with pytest.raises(error_class) as e:
FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT, yml_file), dev_connections)
if isinstance(e.value, ResolveToolError):
assert isinstance(e.value.inner_exception, inner_class)
@pytest.mark.parametrize(
"ordered_flow_folder, unordered_flow_folder",
[
("web_classification_no_variants", "web_classification_no_variants_unordered"),
],
)
def test_node_topology_in_order(self, ordered_flow_folder, unordered_flow_folder, dev_connections):
ordered_executor = FlowExecutor.create(get_yaml_file(ordered_flow_folder), dev_connections)
unordered_executor = FlowExecutor.create(get_yaml_file(unordered_flow_folder), dev_connections)
for node1, node2 in zip(ordered_executor._flow.nodes, unordered_executor._flow.nodes):
assert node1.name == node2.name
@pytest.mark.parametrize(
"flow_folder, error_class, inner_class",
[
("invalid_connection", ResolveToolError, ConnectionNotFound),
("tool_type_missing", ResolveToolError, NotImplementedError),
("wrong_module", FailedToImportModule, None),
("wrong_api", ResolveToolError, APINotFound),
("wrong_provider", ResolveToolError, APINotFound),
],
)
def test_invalid_flow_dag(self, flow_folder, error_class, inner_class, dev_connections):
with pytest.raises(error_class) as e:
FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT), dev_connections)
if isinstance(e.value, ResolveToolError):
assert isinstance(e.value.inner_exception, inner_class)
@pytest.mark.parametrize(
"flow_folder, line_input, error_class",
[
("simple_flow_with_python_tool", {"num11": "22"}, InputNotFound),
("simple_flow_with_python_tool", {"num": "hello"}, InputTypeError),
("python_tool_with_simple_image_without_default", {}, InputNotFound),
],
)
def test_flow_run_input_type_invalid(self, flow_folder, line_input, error_class, dev_connections):
# Flow run - the input is from get_partial_line_inputs()
executor = FlowExecutor.create(get_yaml_file(flow_folder, FLOW_ROOT), dev_connections)
with pytest.raises(error_class):
executor.exec_line(line_input)
@pytest.mark.parametrize(
"flow_folder, line_input, error_class, error_msg",
[
(
"flow_output_unserializable",
{"num": "22"},
FlowOutputUnserializable,
(
"The output 'content' for flow is incorrect. The output value is not JSON serializable. "
"JSON dump failed: (TypeError) Object of type UnserializableClass is not JSON serializable. "
"Please verify your flow output and make sure the value serializable."
),
),
],
)
def test_flow_run_execution_errors(self, flow_folder, line_input, error_class, error_msg, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder, WRONG_FLOW_ROOT), dev_connections)
# For now, there exception is designed to be swallowed in executor. But Run Info would have the error details
res = executor.exec_line(line_input)
assert error_msg == res.run_info.error["message"]
@pytest.mark.parametrize(
"flow_folder, inputs_mapping, error_message, error_class",
[
(
"simple_flow_with_python_tool",
{"num": "${data.num}"},
(
"The input for flow is incorrect. The value for flow input 'num' in line 0 of input data does not "
"match the expected type 'int'. Please change flow input type or adjust the input value in "
"your input data."
),
"InputTypeError",
),
],
)
def test_batch_run_input_type_invalid(
self, flow_folder, inputs_mapping, error_message, error_class, dev_connections
):
# Bulk run - the input is from sample.json
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections
)
input_dirs = {"data": get_flow_inputs_file(flow_folder)}
output_dir = Path(mkdtemp())
batch_results = batch_engine.run(input_dirs, inputs_mapping, output_dir)
assert error_message in str(
batch_results.error_summary.error_list[0].error
), f"Expected message {error_message} but got {str(batch_results.error_summary.error_list[0].error)}"
assert error_class in str(
batch_results.error_summary.error_list[0].error
), f"Expected message {error_class} but got {str(batch_results.error_summary.error_list[0].error)}"
@pytest.mark.parametrize(
"path_root, flow_folder, node_name, line_input, error_class, error_msg",
[
(
FLOW_ROOT,
"simple_flow_with_python_tool",
"divide_num",
{"num11": "22"},
InputNotFound,
(
"The input for node is incorrect. Node input 'num' is not found in input data "
"for node 'divide_num'. Please verify the inputs data for the node."
),
),
(
FLOW_ROOT,
"simple_flow_with_python_tool",
"divide_num",
{"num": "hello"},
InputTypeError,
(
"The input for node is incorrect. Value for input 'num' of node 'divide_num' "
"is not type 'int'. Please review and rectify the input data."
),
),
(
WRONG_FLOW_ROOT,
"flow_input_reference_invalid",
"divide_num",
{"num": "22"},
InputNotFound,
(
"The input for node is incorrect. Node input 'num_1' is not found from flow "
"inputs of node 'divide_num'. Please review the node definition in your flow."
),
),
(
FLOW_ROOT,
"simple_flow_with_python_tool",
"bad_node_name",
{"num": "22"},
SingleNodeValidationError,
(
"Validation failed when attempting to execute the node. Node 'bad_node_name' is not found in flow "
"'flow.dag.yaml'. Please change node name or correct the flow file."
),
),
(
WRONG_FLOW_ROOT,
"node_missing_type_or_source",
"divide_num",
{"num": "22"},
SingleNodeValidationError,
(
"Validation failed when attempting to execute the node. Properties 'source' or 'type' are not "
"specified for Node 'divide_num' in flow 'flow.dag.yaml'. Please make sure "
"these properties are in place and try again."
),
),
],
)
def test_single_node_input_type_invalid(
self, path_root: str, flow_folder, node_name, line_input, error_class, error_msg, dev_connections
):
# Single Node run - the inputs are from flow_inputs + dependency_nodes_outputs
with pytest.raises(error_class) as exe_info:
FlowExecutor.load_and_exec_node(
flow_file=DAG_FILE_NAME,
node_name=node_name,
flow_inputs=line_input,
dependency_nodes_outputs={},
connections=dev_connections,
working_dir=Path(path_root) / flow_folder,
raise_ex=True,
)
assert error_msg == exe_info.value.message
@pytest.mark.parametrize(
"flow_folder, msg",
[
(
"prompt_tool_with_duplicated_inputs",
"Invalid inputs {'template'} in prompt template of node prompt_tool_with_duplicated_inputs. "
"These inputs are duplicated with the reserved parameters of prompt tool.",
),
(
"llm_tool_with_duplicated_inputs",
"Invalid inputs {'prompt'} in prompt template of node llm_tool_with_duplicated_inputs. "
"These inputs are duplicated with the parameters of AzureOpenAI.completion.",
),
],
)
def test_flow_run_with_duplicated_inputs(self, flow_folder, msg, dev_connections):
with pytest.raises(ResolveToolError, match=msg) as e:
FlowExecutor.create(get_yaml_file(flow_folder, FLOW_ROOT), dev_connections)
assert isinstance(e.value.inner_exception, NodeInputValidationError)
@pytest.mark.parametrize(
"flow_folder, batch_input, raise_on_line_failure, error_class",
[
("simple_flow_with_python_tool", [{"num": "hello"}], True, Exception),
("simple_flow_with_python_tool", [{"num": "hello"}], False, InputTypeError),
("simple_flow_with_python_tool", [{"num": "22"}], True, None),
("simple_flow_with_python_tool", [{"num": "22"}], False, None),
],
)
def test_batch_run_raise_on_line_failure(
self, flow_folder, batch_input, raise_on_line_failure, error_class, dev_connections
):
# Bulk run - the input is from sample.json
batch_engine = BatchEngine(
get_yaml_file(flow_folder), get_flow_folder(flow_folder), connections=dev_connections
)
# prepare input file and output dir
input_file = Path(mkdtemp()) / "inputs.jsonl"
dump_list_to_jsonl(input_file, batch_input)
input_dirs = {"data": input_file}
output_dir = Path(mkdtemp())
inputs_mapping = {"num": "${data.num}"}
if error_class is None:
batch_result = batch_engine.run(
input_dirs, inputs_mapping, output_dir, raise_on_line_failure=raise_on_line_failure
)
assert batch_result.total_lines == 1
assert batch_result.completed_lines == 1
assert batch_result.error_summary.error_list == []
else:
if raise_on_line_failure:
with pytest.raises(error_class):
batch_engine.run(
input_dirs, inputs_mapping, output_dir, raise_on_line_failure=raise_on_line_failure
)
else:
batch_result = batch_engine.run(
input_dirs, inputs_mapping, output_dir, raise_on_line_failure=raise_on_line_failure
)
assert batch_result.total_lines == 1
assert batch_result.failed_lines == 1
assert error_class.__name__ in json.dumps(batch_result.error_summary.error_list[0].error)
| promptflow/src/promptflow/tests/executor/e2etests/test_executor_validation.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/e2etests/test_executor_validation.py",
"repo_id": "promptflow",
"token_count": 8603
} | 45 |
{{api}}
| promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/prompt_with_duplicated_inputs.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool_with_duplicated_inputs/prompt_with_duplicated_inputs.jinja2",
"repo_id": "promptflow",
"token_count": 4
} | 46 |
import inspect
import pytest
from promptflow import tool
from promptflow._core.tool import InputSetting, ToolType
from promptflow._core.tracer import Tracer, TraceType
from promptflow.exceptions import UserErrorException
@tool
def decorated_without_parentheses(a: int):
return a
@tool()
def decorated_with_parentheses(a: int):
return a
@tool
async def decorated_without_parentheses_async(a: int):
return a
@tool()
async def decorated_with_parentheses_async(a: int):
return a
@tool(
name="tool_with_attributes",
description="Sample tool with a lot of attributes",
type=ToolType.LLM,
input_settings=InputSetting(),
streaming_option_parameter="stream",
extra_a="a",
extra_b="b",
)
def tool_with_attributes(stream: bool, a: int, b: int):
return stream, a, b
@pytest.mark.unittest
class TestTool:
"""This class tests the `tool` decorator."""
@pytest.mark.asyncio
@pytest.mark.parametrize(
"func",
[
decorated_with_parentheses,
decorated_without_parentheses,
decorated_with_parentheses_async,
decorated_without_parentheses_async,
],
)
async def test_traces_are_created_correctly(self, func):
Tracer.start_tracing("test_run_id")
if inspect.iscoroutinefunction(func):
result = await func(1)
else:
result = func(1)
assert result == 1
traces = Tracer.end_tracing()
assert len(traces) == 1
trace = traces[0]
assert trace["name"] == func.__qualname__
assert trace["type"] == TraceType.TOOL
assert trace["inputs"] == {"a": 1}
assert trace["output"] == 1
assert trace["error"] is None
assert trace["children"] == []
assert isinstance(trace["start_time"], float)
assert isinstance(trace["end_time"], float)
def test_attributes_are_set_to_the_tool_function(self):
stream, a, b = tool_with_attributes(True, 1, 2)
# Check the results are as expected
assert stream is True
assert a == 1
assert b == 2
# Check the attributes are set to the function
assert getattr(tool_with_attributes, "__tool") is None
assert getattr(tool_with_attributes, "__name") == "tool_with_attributes"
assert getattr(tool_with_attributes, "__description") == "Sample tool with a lot of attributes"
assert getattr(tool_with_attributes, "__type") == ToolType.LLM
assert getattr(tool_with_attributes, "__input_settings") == InputSetting()
assert getattr(tool_with_attributes, "__extra_info") == {"extra_a": "a", "extra_b": "b"}
assert getattr(tool_with_attributes, "_streaming_option_parameter") == "stream"
def test_invalid_tool_type_should_raise_error(self):
with pytest.raises(UserErrorException, match="Tool type invalid_type is not supported yet."):
@tool(type="invalid_type")
def invalid_tool_type():
pass
| promptflow/src/promptflow/tests/executor/unittests/_core/test_tool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_core/test_tool.py",
"repo_id": "promptflow",
"token_count": 1222
} | 47 |
import pytest
import os
from unittest.mock import patch
from datetime import datetime
from promptflow._utils.utils import is_json_serializable, get_int_env_var, log_progress
class MyObj:
pass
@pytest.mark.unittest
class TestUtils:
@pytest.mark.parametrize("value, expected_res", [(None, True), (1, True), ("", True), (MyObj(), False)])
def test_is_json_serializable(self, value, expected_res):
assert is_json_serializable(value) == expected_res
@pytest.mark.parametrize(
"env_var, env_value, default_value, expected_result",
[
("TEST_VAR", "10", None, 10), # Valid integer string
("TEST_VAR", "invalid", None, None), # Invalid integer strings
("TEST_VAR", None, 5, 5), # Environment variable does not exist
("TEST_VAR", "10", 5, 10), # Valid integer string with a default value
("TEST_VAR", "invalid", 5, 5), # Invalid integer string with a default value
])
def test_get_int_env_var(self, env_var, env_value, default_value, expected_result):
with patch.dict(os.environ, {env_var: env_value} if env_value is not None else {}):
assert get_int_env_var(env_var, default_value) == expected_result
@pytest.mark.parametrize(
"env_var, env_value, expected_result",
[
("TEST_VAR", "10", 10), # Valid integer string
("TEST_VAR", "invalid", None), # Invalid integer strings
("TEST_VAR", None, None), # Environment variable does not exist
])
def test_get_int_env_var_without_default_vaue(self, env_var, env_value, expected_result):
with patch.dict(os.environ, {env_var: env_value} if env_value is not None else {}):
assert get_int_env_var(env_var) == expected_result
@patch('promptflow.executor._line_execution_process_pool.bulk_logger', autospec=True)
def test_log_progress(self, mock_logger):
run_start_time = datetime.utcnow()
count = 1
# Tests do not log when not specified at specified intervals (interval = 2)
total_count = 20
log_progress(run_start_time, mock_logger, count, total_count)
mock_logger.info.assert_not_called()
# Test logging at specified intervals (interval = 2)
count = 8
log_progress(run_start_time, mock_logger, count, total_count)
mock_logger.info.assert_any_call("Finished 8 / 20 lines.")
mock_logger.reset_mock()
# Test logging using last_log_count parameter (conut - last_log_count > interval(2))
log_progress(run_start_time, mock_logger, count, total_count, last_log_count=5)
mock_logger.info.assert_any_call("Finished 8 / 20 lines.")
mock_logger.reset_mock()
# Test don't log using last_log_count parameter ((conut - last_log_count < interval(2))
log_progress(run_start_time, mock_logger, count, total_count, last_log_count=7)
mock_logger.info.assert_not_called()
| promptflow/src/promptflow/tests/executor/unittests/_utils/test_utils.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_utils/test_utils.py",
"repo_id": "promptflow",
"token_count": 1286
} | 48 |
import pytest
from pathlib import Path
from typing import Callable
from promptflow import tool
from promptflow.executor._assistant_tool_invoker import AssistantToolInvoker
from promptflow.executor._errors import UnsupportedAssistantToolType
@pytest.mark.unittest
class TestAssistantToolInvoker:
@pytest.fixture
def tool_definitions(self):
return [
{"type": "code_interpreter"},
{"type": "retrieval"},
{
"type": "function",
"tool_type": "python",
"source": {"type": "code", "path": "test_assistant_tool_invoker.py"},
}
]
@pytest.mark.parametrize(
"predefined_inputs", [({}), ({"input_int": 1})]
)
def test_load_tools(self, predefined_inputs):
input_int = 1
input_str = "test"
tool_definitions = [
{"type": "code_interpreter"},
{"type": "retrieval"},
{
"type": "function",
"tool_type": "python",
"source": {"type": "code", "path": "test_assistant_tool_invoker.py"},
"predefined_inputs": predefined_inputs
}
]
# Test load tools
invoker = AssistantToolInvoker.init(tool_definitions, working_dir=Path(__file__).parent)
for tool_name, assistant_tool in invoker._assistant_tools.items():
assert tool_name in ("code_interpreter", "retrieval", "sample_tool")
assert assistant_tool.name == tool_name
assert isinstance(assistant_tool.openai_definition, dict)
if tool_name in ("code_interpreter", "retrieval"):
assert assistant_tool.func is None
else:
assert isinstance(assistant_tool.func, Callable)
# Test to_openai_tools
descriptions = invoker.to_openai_tools()
assert len(descriptions) == 3
properties = {
"input_int": {"description": "This is a sample input int.", "type": "number"},
"input_str": {"description": "This is a sample input str.", "type": "string"}
}
required = ["input_int", "input_str"]
self._remove_predefined_inputs(properties, predefined_inputs.keys())
self._remove_predefined_inputs(required, predefined_inputs.keys())
for description in descriptions:
if description["type"] in ("code_interpreter", "retrieval"):
assert description == {"type": description["type"]}
else:
assert description == {
"type": "function",
"function": {
"name": "sample_tool",
"description": "This is a sample tool.",
"parameters": {
"type": "object",
"properties": properties,
"required": required
}
}
}
# Test invoke tool
kwargs = {"input_int": input_int, "input_str": input_str}
self._remove_predefined_inputs(kwargs, predefined_inputs.keys())
result = invoker.invoke_tool(func_name="sample_tool", kwargs=kwargs)
assert result == (input_int, input_str)
def test_load_tools_with_invalid_case(self):
tool_definitions = [{"type": "invalid_type"}]
with pytest.raises(UnsupportedAssistantToolType) as exc_info:
AssistantToolInvoker.init(tool_definitions)
assert "Unsupported assistant tool type" in exc_info.value.message
def _remove_predefined_inputs(self, value: any, predefined_inputs: list):
for input in predefined_inputs:
if input in value:
if isinstance(value, dict):
value.pop(input)
elif isinstance(value, list):
value.remove(input)
@tool
def sample_tool(input_int: int, input_str: str):
"""This is a sample tool.
:param input_int: This is a sample input int.
:type input_int: int
:param input_str: This is a sample input str.
:type input_str: str
"""
return input_int, input_str
| promptflow/src/promptflow/tests/executor/unittests/executor/test_assistant_tool_invoker.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/executor/test_assistant_tool_invoker.py",
"repo_id": "promptflow",
"token_count": 1969
} | 49 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import copy
import inspect
import json
from pathlib import Path
from typing import Dict, List
import vcr
from vcr import matchers
from vcr.request import Request
from .constants import FILTER_HEADERS, TEST_CLASSES_FOR_RUN_INTEGRATION_TEST_RECORDING, SanitizedValues
from .processors import (
AzureMLExperimentIDProcessor,
AzureOpenAIConnectionProcessor,
AzureResourceProcessor,
AzureWorkspaceTriadProcessor,
DropProcessor,
EmailProcessor,
IndexServiceProcessor,
PFSProcessor,
RecordingProcessor,
StorageProcessor,
UserInfoProcessor,
)
from .utils import (
is_httpx_response,
is_json_payload_request,
is_live,
is_record,
is_replay,
sanitize_automatic_runtime_request_path,
sanitize_azure_workspace_triad,
sanitize_file_share_flow_path,
sanitize_pfs_request_body,
sanitize_upload_hash,
)
from .variable_recorder import VariableRecorder
class PFAzureIntegrationTestRecording:
def __init__(
self,
test_class,
test_func_name: str,
user_object_id: str,
tenant_id: str,
variable_recorder: VariableRecorder,
):
self.test_class = test_class
self.test_func_name = test_func_name
self.user_object_id = user_object_id
self.tenant_id = tenant_id
self.recording_file = self._get_recording_file()
self.recording_processors = self._get_recording_processors()
self.vcr = self._init_vcr()
self._cm = None # context manager from VCR
self.cassette = None
self.variable_recorder = variable_recorder
@staticmethod
def from_test_case(test_class, test_func_name: str, **kwargs) -> "PFAzureIntegrationTestRecording":
test_class_name = test_class.__name__
if test_class_name in TEST_CLASSES_FOR_RUN_INTEGRATION_TEST_RECORDING:
return PFAzureRunIntegrationTestRecording(
test_class=test_class,
test_func_name=test_func_name,
user_object_id=kwargs["user_object_id"],
tenant_id=kwargs["tenant_id"],
variable_recorder=kwargs["variable_recorder"],
)
else:
return PFAzureIntegrationTestRecording(
test_class=test_class,
test_func_name=test_func_name,
user_object_id=kwargs["user_object_id"],
tenant_id=kwargs["tenant_id"],
variable_recorder=kwargs["variable_recorder"],
)
def _get_recording_file(self) -> Path:
# recording files are expected to be located at "tests/test_configs/recordings"
# test file path should locate at "tests/sdk_cli_azure_test/e2etests"
test_file_path = Path(inspect.getfile(self.test_class)).resolve()
recording_dir = (test_file_path.parent.parent.parent / "test_configs" / "recordings").resolve()
recording_dir.mkdir(exist_ok=True)
test_file_name = test_file_path.stem
test_class_name = self.test_class.__name__
if "[" in self.test_func_name:
# for tests that use pytest.mark.parametrize, there will be "[]" in test function name
# recording filename pattern:
# {test_file_name}_{test_class_name}_{test_func_name}/{parameter_id}.yaml
test_func_name, parameter_id = self.test_func_name.split("[")
parameter_id = parameter_id.rstrip("]")
test_func_dir = (recording_dir / f"{test_file_name}_{test_class_name}_{test_func_name}").resolve()
test_func_dir.mkdir(exist_ok=True)
recording_file = (test_func_dir / f"{parameter_id}.yaml").resolve()
else:
# for most remaining tests
# recording filename pattern: {test_file_name}_{test_class_name}_{test_func_name}.yaml
recording_filename = f"{test_file_name}_{test_class_name}_{self.test_func_name}.yaml"
recording_file = (recording_dir / recording_filename).resolve()
if is_record() and recording_file.is_file():
recording_file.unlink()
return recording_file
def _init_vcr(self) -> vcr.VCR:
_vcr = vcr.VCR(
cassette_library_dir=self.recording_file.parent.as_posix(),
before_record_request=self._process_request_recording,
before_record_response=self._process_response_recording,
decode_compressed_response=True,
record_mode="none" if is_replay() else "all",
filter_headers=FILTER_HEADERS,
)
_vcr.match_on += ("body",)
return _vcr
def enter_vcr(self):
self._cm = self.vcr.use_cassette(self.recording_file.as_posix())
self.cassette = self._cm.__enter__()
def exit_vcr(self):
if is_record():
self._postprocess_recording()
self._cm.__exit__()
def _process_request_recording(self, request: Request) -> Request:
if is_live():
return request
if is_record():
for processor in self.recording_processors:
request = processor.process_request(request)
return request
def _process_response_recording(self, response: Dict) -> Dict:
if is_live():
return response
# httpx and non-httpx responses have different structure
# non-httpx has .body.string, while httpx has .content
# in our sanitizers (processors) logic, we only handle .body.string
# so make httpx align non-httpx for less code change
is_httpx = is_httpx_response(response)
if is_httpx:
body_string = response.pop("content")
response["body"] = {"string": body_string}
else:
response["body"]["string"] = response["body"]["string"].decode("utf-8")
if is_record():
# lower and filter some headers
headers = {}
for k in response["headers"]:
if k.lower() not in FILTER_HEADERS:
headers[k.lower()] = response["headers"][k]
response["headers"] = headers
for processor in self.recording_processors:
response = processor.process_response(response)
if is_httpx:
response["content"] = response["body"]["string"]
if not is_replay():
response.pop("body")
if isinstance(response["content"], bytes):
response["content"] = response["content"].decode("utf-8")
else:
# vcrpy does not handle well with httpx, so we need some transformations
# otherwise, replay tests will break during init VCR response instance
response["status"] = {"code": response["status_code"], "message": ""}
if isinstance(response["body"]["string"], str):
response["body"]["string"] = response["body"]["string"].encode("utf-8")
else:
response["body"]["string"] = response["body"]["string"].encode("utf-8")
return response
def _get_recording_processors(self) -> List[RecordingProcessor]:
return [
AzureMLExperimentIDProcessor(),
AzureOpenAIConnectionProcessor(),
AzureResourceProcessor(),
AzureWorkspaceTriadProcessor(),
DropProcessor(),
EmailProcessor(),
IndexServiceProcessor(),
PFSProcessor(),
StorageProcessor(),
UserInfoProcessor(user_object_id=self.user_object_id, tenant_id=self.tenant_id),
]
def _postprocess_recording(self) -> None:
self._apply_replacement_for_recordings()
return
def _apply_replacement_for_recordings(self) -> None:
for i in range(len(self.cassette.data)):
req, resp = self.cassette.data[i]
req = self.variable_recorder.sanitize_request(req)
resp = self.variable_recorder.sanitize_response(resp)
self.cassette.data[i] = (req, resp)
return
class PFAzureRunIntegrationTestRecording(PFAzureIntegrationTestRecording):
"""Test class for run operations in Prompt Flow Azure.
Different from other operations, run operations have:
- duplicate network requests for stream run
- blob storage requests contain upload hash
- Submit and get run data API requests are indistinguishable without run name in body
Use a separate class with more pre/post recording processing method or
request matchers to handle above cases.
"""
def _init_vcr(self) -> vcr.VCR:
_vcr = super(PFAzureRunIntegrationTestRecording, self)._init_vcr()
_vcr.register_matcher("path", self._custom_request_path_matcher)
_vcr.register_matcher("body", self._custom_request_body_matcher)
return _vcr
def enter_vcr(self):
self._cm = self.vcr.use_cassette(
self.recording_file.as_posix(),
allow_playback_repeats=True,
filter_query_parameters=["api-version"],
)
self.cassette = self._cm.__enter__()
def _postprocess_recording(self) -> None:
self._drop_duplicate_recordings()
super(PFAzureRunIntegrationTestRecording, self)._postprocess_recording()
def _drop_duplicate_recordings(self) -> None:
# stream run operation contains two requests:
# 1. get status; 2. get logs
# before the run is terminated, there will be many duplicate requests
# getting status/logs, which leads to infinite loop during replay
# therefore apply such post process to drop those duplicate recordings
dropped_recordings = []
run_data_requests = dict()
log_content_requests = dict()
for req, resp in self.cassette.data:
# run hisotry's rundata API
if str(req.path).endswith("/rundata"):
body = req.body.decode("utf-8")
body_dict = json.loads(body)
name = body_dict["runId"]
run_data_requests[name] = (req, resp)
continue
if str(req.path).endswith("/logContent"):
log_content_requests[req.uri] = (req, resp)
continue
dropped_recordings.append((req, resp))
# append rundata recording(s)
for req, resp in run_data_requests.values():
dropped_recordings.append((req, resp))
for req, resp in log_content_requests.values():
dropped_recordings.append((req, resp))
self.cassette.data = dropped_recordings
return
def _custom_request_path_matcher(self, r1: Request, r2: Request) -> bool:
# NOTE: orders of below conditions matter, please modify with caution
# in run download scenario, observed below wired path: https://<xxx>/https://<yyy>/<remaining>
# as we don't have append/replace logic, it might result from Azure blob client,
# which is hard to patch; therefore, hack this in matcher (here)
# https:// should appear in path, so it's safe to use this as a condition
if "https://" in r1.path:
_path = str(r1.path)
endpoint = ".blob.core.windows.net/"
duplicate_path = _path[_path.index(endpoint) + len(endpoint) :]
path_for_compare = _path[: _path.index("https://")] + duplicate_path[duplicate_path.index("/") + 1 :]
return path_for_compare == r2.path
# for blob storage request, sanitize the upload hash in path
if r1.host == r2.host and r1.host == SanitizedValues.BLOB_STORAGE_REQUEST_HOST:
return sanitize_upload_hash(r1.path) == r2.path
# for file share request, mainly target pytest fixture "created_flow"
if r1.host == r2.host and r1.host == SanitizedValues.FILE_SHARE_REQUEST_HOST:
return sanitize_file_share_flow_path(r1.path) == r2.path
# for automatic runtime, sanitize flow session id in path
if r1.host == r2.host and ("FlowSessions" in r1.path and "FlowSessions" in r2.path):
path1 = sanitize_automatic_runtime_request_path(r1.path)
path2 = sanitize_automatic_runtime_request_path(r2.path)
return sanitize_azure_workspace_triad(path1) == path2
return r1.path == r2.path
def _custom_request_body_matcher(self, r1: Request, r2: Request) -> bool:
if is_json_payload_request(r1) and r1.body is not None:
# note that `sanitize_upload_hash` is not idempotent
# so we should not modify r1 directly
# otherwise it will be sanitized multiple times with many zeros
_r1 = copy.deepcopy(r1)
body1 = _r1.body.decode("utf-8")
body1 = sanitize_pfs_request_body(body1)
body1 = sanitize_upload_hash(body1)
_r1.body = body1.encode("utf-8")
try:
return matchers.body(_r1, r2)
except AssertionError:
# if not match, extra sanitize flow file share path (if exists)
# for potential pytest fixture "created_flow" scenario
body_dict = json.loads(body1)
if "flowDefinitionFilePath" in body_dict:
body_dict["flowDefinitionFilePath"] = "Users/unknown_user/promptflow/flow_name/flow.dag.yaml"
body1 = json.dumps(body_dict)
_r1.body = body1.encode("utf-8")
return matchers.body(_r1, r2)
else:
return False
else:
return matchers.body(r1, r2)
| promptflow/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/bases.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/bases.py",
"repo_id": "promptflow",
"token_count": 6082
} | 50 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from unittest.mock import MagicMock
import pytest
from promptflow.exceptions import UserErrorException
@pytest.mark.unittest
class TestUtils:
def test_url_parse(self):
from promptflow.azure._utils._url_utils import BulkRunId, BulkRunURL
flow_id = (
"azureml://experiment/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/flow/"
"0ab9d2dd-3bac-4b68-bb28-12af959b1165/bulktest/715efeaf-b0b4-4778-b94a-2538152b8766/"
"run/f88faee6-e510-45b7-9e63-08671b30b3a2"
)
flow_id = BulkRunId(flow_id)
assert flow_id.experiment_id == "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"
assert flow_id.flow_id == "0ab9d2dd-3bac-4b68-bb28-12af959b1165"
assert flow_id.bulk_test_id == "715efeaf-b0b4-4778-b94a-2538152b8766"
flow_run_url = (
"https://ml.azure.com/prompts/flow/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/"
"0ab9d2dd-3bac-4b68-bb28-12af959b1165/bulktest/715efeaf-b0b4-4778-b94a-2538152b8766/"
"details?wsid=/subscriptions/96aede12-2f73-41cb-b983-6d11a904839b/resourcegroups/promptflow/"
"providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus"
)
flow_url = BulkRunURL(flow_run_url)
assert flow_url.experiment_id == "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"
assert flow_url.flow_id == "0ab9d2dd-3bac-4b68-bb28-12af959b1165"
assert flow_url.bulk_test_id == "715efeaf-b0b4-4778-b94a-2538152b8766"
def test_forbidden_new_caller(self):
from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller
with pytest.raises(UserErrorException) as e:
FlowServiceCaller(MagicMock(), MagicMock(), MagicMock())
assert "_FlowServiceCallerFactory" in str(e.value)
| promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_utils.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_utils.py",
"repo_id": "promptflow",
"token_count": 953
} | 51 |
import copy
import os.path
import re
import shutil
import tempfile
from pathlib import Path
import mock
import pytest
from promptflow._sdk._constants import FLOW_TOOLS_JSON, NODE_VARIANTS, PROMPT_FLOW_DIR_NAME, USE_VARIANTS
from promptflow._utils.yaml_utils import load_yaml
from promptflow.connections import AzureOpenAIConnection
PROMOTFLOW_ROOT = Path(__file__) / "../../../.."
TEST_ROOT = Path(__file__).parent.parent.parent
MODEL_ROOT = TEST_ROOT / "test_configs/e2e_samples"
CONNECTION_FILE = (PROMOTFLOW_ROOT / "connections.json").resolve().absolute().as_posix()
FLOWS_DIR = "./tests/test_configs/flows"
DATAS_DIR = "./tests/test_configs/datas"
def e2e_test_docker_build_and_run(output_path):
"""Build and run the docker image locally.
This function is for adhoc local test and need to run on a dev machine with docker installed.
"""
import subprocess
subprocess.check_output(["docker", "build", ".", "-t", "test"], cwd=output_path)
subprocess.check_output(["docker", "tag", "test", "elliotz/promptflow-export-result:latest"], cwd=output_path)
subprocess.check_output(
[
"docker",
"run",
"-e",
"CUSTOM_CONNECTION_AZURE_OPENAI_API_KEY='xxx'" "elliotz/promptflow-export-result:latest",
],
cwd=output_path,
)
@pytest.fixture
def setup_connections(azure_open_ai_connection: AzureOpenAIConnection):
_ = {
"azure_open_ai_connection": azure_open_ai_connection,
}
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities._connection import _Connection
_client = PFClient()
_client.connections.create_or_update(
_Connection._load(
data={
"name": "custom_connection",
"type": "custom",
"configs": {
"CHAT_DEPLOYMENT_NAME": "gpt-35-turbo",
"AZURE_OPENAI_API_BASE": azure_open_ai_connection.api_base,
},
"secrets": {
"AZURE_OPENAI_API_KEY": azure_open_ai_connection.api_key,
},
}
)
)
_client.connections.create_or_update(
_Connection._load(
data={
"name": "azure_open_ai_connection",
"type": "azure_open_ai",
"api_type": azure_open_ai_connection.api_type,
"api_base": azure_open_ai_connection.api_base,
"api_version": azure_open_ai_connection.api_version,
"api_key": azure_open_ai_connection.api_key,
}
)
)
@pytest.mark.usefixtures("use_secrets_config_file", "setup_connections")
@pytest.mark.sdk_test
@pytest.mark.e2etest
class TestFlowLocalOperations:
def test_flow_build_as_docker(self, pf) -> None:
source = f"{FLOWS_DIR}/intent-copilot"
output_path = f"{FLOWS_DIR}/export/linux"
shutil.rmtree(output_path, ignore_errors=True)
(Path(source) / ".runs").mkdir(exist_ok=True)
(Path(source) / ".runs" / "dummy_run_file").touch()
with mock.patch("promptflow._sdk.operations._flow_operations.generate_random_string") as mock_random_string:
mock_random_string.return_value = "dummy1"
pf.flows.build(
flow=source,
output=output_path,
format="docker",
)
assert mock_random_string.call_count == 1
# check if .amlignore works
assert os.path.isdir(f"{source}/data")
assert not (Path(output_path) / "flow" / "data").exists()
# check if .runs is ignored by default
assert os.path.isfile(f"{source}/.runs/dummy_run_file")
assert not (Path(output_path) / "flow" / ".runs" / "dummy_run_file").exists()
# e2e_test_docker_build_and_run(output_path)
def test_flow_build_as_docker_with_additional_includes(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
with tempfile.TemporaryDirectory() as temp_dir:
pf.flows.build(
flow=source,
output=temp_dir,
format="docker",
)
for additional_include in [
"../external_files/convert_to_dict.py",
"../external_files/fetch_text_content_from_url.py",
"../external_files/summarize_text_content.jinja2",
]:
additional_include_path = Path(source, additional_include)
target_path = Path(temp_dir, "flow", additional_include_path.name)
assert target_path.is_file()
assert target_path.read_text() == additional_include_path.read_text()
def test_flow_build_flow_only(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
with tempfile.TemporaryDirectory() as temp_dir:
pf.flows.build(
flow=source,
output=temp_dir,
format="docker",
flow_only=True,
)
for additional_include in [
"../external_files/convert_to_dict.py",
"../external_files/fetch_text_content_from_url.py",
"../external_files/summarize_text_content.jinja2",
]:
additional_include_path = Path(source, additional_include)
target_path = Path(temp_dir, additional_include_path.name)
assert target_path.is_file()
assert target_path.read_text() == additional_include_path.read_text()
assert Path(temp_dir, PROMPT_FLOW_DIR_NAME, FLOW_TOOLS_JSON).is_file()
with open(Path(temp_dir, "flow.dag.yaml"), "r", encoding="utf-8") as f:
flow_dag_content = load_yaml(f)
assert NODE_VARIANTS not in flow_dag_content
assert "additional_includes" not in flow_dag_content
assert not any([USE_VARIANTS in node for node in flow_dag_content["nodes"]])
def test_flow_build_as_docker_with_variant(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
flow_dag_path = Path(source, "flow.dag.yaml")
flow_dag = load_yaml(flow_dag_path)
with tempfile.TemporaryDirectory() as temp_dir:
pf.flows.build(
flow=source,
output=temp_dir,
format="docker",
variant="${summarize_text_content.variant_0}",
)
new_flow_dag_path = Path(temp_dir, "flow", "flow.dag.yaml")
new_flow_dag = load_yaml(new_flow_dag_path)
target_node = next(filter(lambda x: x["name"] == "summarize_text_content", new_flow_dag["nodes"]))
target_node.pop("name")
assert target_node == flow_dag["node_variants"]["summarize_text_content"]["variants"]["variant_0"]["node"]
def test_flow_build_generate_flow_tools_json(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
with tempfile.TemporaryDirectory() as temp_dir:
pf.flows.build(
flow=source,
output=temp_dir,
variant="${summarize_text_content.variant_0}",
)
flow_tools_path = Path(temp_dir) / "flow" / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
assert flow_tools_path.is_file()
# package in flow.tools.json is not determined by the flow, so we don't check it here
assert load_yaml(flow_tools_path)["code"] == {
"classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "classify_with_llm.jinja2",
"type": "llm",
},
"convert_to_dict.py": {
"function": "convert_to_dict",
"inputs": {"input_str": {"type": ["string"]}},
"source": "convert_to_dict.py",
"type": "python",
},
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": "fetch_text_content_from_url.py",
"type": "python",
},
"prepare_examples.py": {
"function": "prepare_examples",
"source": "prepare_examples.py",
"type": "python",
},
"summarize_text_content.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": "summarize_text_content.jinja2",
"type": "llm",
},
}
def test_flow_validate_generate_flow_tools_json(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_with_additional_include"
flow_tools_path = Path(source) / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
flow_tools_path.unlink(missing_ok=True)
validation_result = pf.flows.validate(flow=source)
assert validation_result.passed
assert flow_tools_path.is_file()
# package in flow.tools.json is not determined by the flow, so we don't check it here
assert load_yaml(flow_tools_path)["code"] == {
"classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "classify_with_llm.jinja2",
"type": "llm",
},
"convert_to_dict.py": {
"function": "convert_to_dict",
"inputs": {"input_str": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "convert_to_dict.py"),
"type": "python",
},
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "fetch_text_content_from_url.py"),
"type": "python",
},
"prepare_examples.py": {
"function": "prepare_examples",
"source": "prepare_examples.py",
"type": "python",
},
"summarize_text_content.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "summarize_text_content.jinja2"),
"type": "llm",
},
"summarize_text_content__variant_1.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": "summarize_text_content__variant_1.jinja2",
"type": "llm",
},
}
def test_flow_validation_failed(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_invalid"
flow_tools_path = Path(source) / PROMPT_FLOW_DIR_NAME / FLOW_TOOLS_JSON
flow_tools_path.unlink(missing_ok=True)
validation_result = pf.flows.validate(flow=source)
error_messages = copy.deepcopy(validation_result.error_messages)
assert "Failed to load python module from file" in error_messages.pop("nodes.2.source.path", "")
for yaml_path in [
"node_variants.summarize_text_content.variants.variant_0.node.source.path",
"nodes.1.source.path",
]:
assert re.search(r"Meta file '.*' can not be found.", error_messages.pop(yaml_path, ""))
assert error_messages == {
"inputs.url.type": "Missing data for required field.",
"outputs.category.type": "Missing data for required field.",
}
assert "line 22" in repr(validation_result)
assert flow_tools_path.is_file()
flow_tools = load_yaml(flow_tools_path)
assert "code" in flow_tools
assert flow_tools["code"] == {
"classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "classify_with_llm.jinja2",
"type": "prompt",
},
"./classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "./classify_with_llm.jinja2",
"type": "llm",
},
"convert_to_dict.py": {
"function": "convert_to_dict",
"inputs": {"input_str": {"type": ["string"]}},
"source": "convert_to_dict.py",
"type": "python",
},
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "fetch_text_content_from_url.py"),
"type": "python",
},
"summarize_text_content__variant_1.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": "summarize_text_content__variant_1.jinja2",
"type": "llm",
},
}
def test_flow_generate_tools_meta(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_invalid"
tools_meta, tools_error = pf.flows._generate_tools_meta(source)
assert tools_meta["code"] == {
"classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "classify_with_llm.jinja2",
"type": "prompt",
},
"./classify_with_llm.jinja2": {
"inputs": {
"examples": {"type": ["string"]},
"text_content": {"type": ["string"]},
"url": {"type": ["string"]},
},
"source": "./classify_with_llm.jinja2",
"type": "llm",
},
"convert_to_dict.py": {
"function": "convert_to_dict",
"inputs": {"input_str": {"type": ["string"]}},
"source": "convert_to_dict.py",
"type": "python",
},
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "fetch_text_content_from_url.py"),
"type": "python",
},
"summarize_text_content__variant_1.jinja2": {
"inputs": {"text": {"type": ["string"]}},
"source": "summarize_text_content__variant_1.jinja2",
"type": "llm",
},
}
# promptflow-tools is not installed in ci
# assert list(tools_meta["package"]) == ["promptflow.tools.azure_translator.get_translation"]
assert "Failed to load python module from file" in tools_error.pop("prepare_examples.py", "")
assert re.search(r"Meta file '.*' can not be found.", tools_error.pop("summarize_text_content.jinja2", ""))
assert tools_error == {}
tools_meta, tools_error = pf.flows._generate_tools_meta(source, source_name="summarize_text_content.jinja2")
assert tools_meta == {"code": {}, "package": {}}
assert re.search(r"Meta file '.*' can not be found.", tools_error.pop("summarize_text_content.jinja2", ""))
assert tools_error == {}
tools_meta, tools_error = pf.flows._generate_tools_meta(source, source_name="fetch_text_content_from_url.py")
assert tools_meta == {
"code": {
"fetch_text_content_from_url.py": {
"function": "fetch_text_content_from_url",
"inputs": {"url": {"type": ["string"]}},
"source": os.path.join("..", "external_files", "fetch_text_content_from_url.py"),
"type": "python",
},
},
"package": {},
}
assert tools_error == {}
@pytest.mark.skip(reason="It will fail in CI for some reasons. Still need to investigate.")
def test_flow_generate_tools_meta_timeout(self, pf) -> None:
source = f"{FLOWS_DIR}/web_classification_invalid"
for tools_meta, tools_error in [
pf.flows._generate_tools_meta(source, timeout=1),
# There is no built-in method to forcefully stop a running thread in Python
# because abruptly stopping a thread can cause issues like resource leaks,
# deadlocks, or inconsistent states.
# Caller (VSCode extension) will handle the timeout error.
# pf.flows._generate_tools_meta(source, source_name="convert_to_dict.py", timeout=1),
]:
assert tools_meta == {"code": {}, "package": {}}
assert tools_error
for error in tools_error.values():
assert "timeout" in error
def test_flow_generate_tools_meta_with_pkg_tool_with_custom_strong_type_connection(self, pf) -> None:
source = f"{FLOWS_DIR}/flow_with_package_tool_with_custom_strong_type_connection"
tools_meta, tools_error = pf.flows._generate_tools_meta(source)
assert tools_error == {}
assert tools_meta["code"] == {}
assert tools_meta["package"] == {
"my_tool_package.tools.my_tool_1.my_tool": {
"function": "my_tool",
"inputs": {
"connection": {
"type": ["CustomConnection"],
"custom_type": ["MyFirstConnection", "MySecondConnection"],
},
"input_text": {"type": ["string"]},
},
"module": "my_tool_package.tools.my_tool_1",
"name": "My First Tool",
"description": "This is my first tool",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2",
},
"my_tool_package.tools.my_tool_2.MyTool.my_tool": {
"class_name": "MyTool",
"function": "my_tool",
"inputs": {
"connection": {"type": ["CustomConnection"], "custom_type": ["MySecondConnection"]},
"input_text": {"type": ["string"]},
},
"module": "my_tool_package.tools.my_tool_2",
"name": "My Second Tool",
"description": "This is my second tool",
"type": "python",
"package": "test-custom-tools",
"package_version": "0.0.2",
},
}
def test_flow_generate_tools_meta_with_script_tool_with_custom_strong_type_connection(self, pf) -> None:
source = f"{FLOWS_DIR}/flow_with_script_tool_with_custom_strong_type_connection"
tools_meta, tools_error = pf.flows._generate_tools_meta(source)
assert tools_error == {}
assert tools_meta["package"] == {}
assert tools_meta["code"] == {
"my_script_tool.py": {
"function": "my_tool",
"inputs": {"connection": {"type": ["CustomConnection"]}, "input_param": {"type": ["string"]}},
"source": "my_script_tool.py",
"type": "python",
}
}
| promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_local_operations.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_local_operations.py",
"repo_id": "promptflow",
"token_count": 10437
} | 52 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
import pytest
from marshmallow import ValidationError
from promptflow import load_flow
from promptflow._sdk.entities._eager_flow import EagerFlow
from promptflow._sdk.entities._flow import ProtectedFlow
from promptflow.exceptions import UserErrorException
FLOWS_DIR = Path("./tests/test_configs/flows")
EAGER_FLOWS_DIR = Path("./tests/test_configs/eager_flows")
@pytest.mark.sdk_test
@pytest.mark.unittest
class TestRun:
@pytest.mark.parametrize(
"kwargs",
[
{"source": EAGER_FLOWS_DIR / "simple_with_yaml"},
{"source": EAGER_FLOWS_DIR / "simple_with_yaml" / "flow.dag.yaml"},
{"source": EAGER_FLOWS_DIR / "simple_without_yaml" / "entry.py", "entry": "my_flow"},
{"source": EAGER_FLOWS_DIR / "multiple_entries" / "entry1.py", "entry": "my_flow1"},
{"source": EAGER_FLOWS_DIR / "multiple_entries" / "entry1.py", "entry": "my_flow2"},
],
)
def test_eager_flow_load(self, kwargs):
flow = load_flow(**kwargs)
assert isinstance(flow, EagerFlow)
@pytest.mark.parametrize(
"kwargs",
[
{"source": FLOWS_DIR / "print_input_flow"},
{"source": FLOWS_DIR / "print_input_flow" / "flow.dag.yaml"},
],
)
def test_dag_flow_load(self, kwargs):
flow = load_flow(**kwargs)
assert isinstance(flow, ProtectedFlow)
def test_flow_load_advanced(self):
flow = load_flow(source=EAGER_FLOWS_DIR / "flow_with_environment")
assert isinstance(flow, EagerFlow)
assert flow._data["environment"] == {"python_requirements_txt": "requirements.txt"}
@pytest.mark.parametrize(
"kwargs, error_message, exception_type",
[
(
{
"source": EAGER_FLOWS_DIR / "multiple_entries" / "entry1.py",
},
"Entry function is not specified",
UserErrorException,
),
(
{
"source": EAGER_FLOWS_DIR / "multiple_entries" / "not_exist.py",
},
"does not exist",
UserErrorException,
),
(
{
"source": EAGER_FLOWS_DIR / "invalid_no_path",
},
"{'path': ['Missing data for required field.']}",
ValidationError,
),
(
{
"source": EAGER_FLOWS_DIR / "invalid_illegal_path",
},
"Can't find directory or file in resolved absolute path:",
ValidationError,
),
(
{"source": EAGER_FLOWS_DIR / "invalid_extra_fields_nodes"},
"{'nodes': ['Unknown field.']}",
ValidationError,
),
],
)
def test_flow_load_invalid(self, kwargs, error_message, exception_type):
with pytest.raises(exception_type) as e:
load_flow(**kwargs)
assert error_message in str(e.value)
| promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_flow.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_flow.py",
"repo_id": "promptflow",
"token_count": 1607
} | 53 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
from promptflow._sdk._utils import get_promptflow_sdk_version
from ..utils import PFSOperations
@pytest.mark.e2etest
class TestGeneralAPIs:
def test_heartbeat(self, pfs_op: PFSOperations) -> None:
response = pfs_op.heartbeat()
assert response.status_code == 200
response_json = response.json
assert isinstance(response_json, dict)
assert "promptflow" in response_json
assert response_json["promptflow"] == get_promptflow_sdk_version()
| promptflow/src/promptflow/tests/sdk_pfs_test/e2etests/test_general_apis.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_pfs_test/e2etests/test_general_apis.py",
"repo_id": "promptflow",
"token_count": 217
} | 54 |
path: ./entry.py
entry: my_flow | promptflow/src/promptflow/tests/test_configs/eager_flows/dummy_flow_with_exception/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/dummy_flow_with_exception/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 12
} | 55 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import time
def my_flow(input_val) -> str:
"""Simple flow with yaml."""
time.sleep(100)
print(f"Hello world! {input_val}")
return f"Hello world! {input_val}"
| promptflow/src/promptflow/tests/test_configs/eager_flows/long_running/entry.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/long_running/entry.py",
"repo_id": "promptflow",
"token_count": 94
} | 56 |
{
"text": "hello"
} | promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/inputs.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/activate_condition_always_met/inputs.json",
"repo_id": "promptflow",
"token_count": 13
} | 57 |
[
{
"expected_node_count": 2,
"expected_outputs": {
"output": "Execution"
},
"expected_bypassed_nodes": [
"nodeA"
]
}
] | promptflow/src/promptflow/tests/test_configs/flows/all_depedencies_bypassed_with_activate_met/expected_result.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/all_depedencies_bypassed_with_activate_met/expected_result.json",
"repo_id": "promptflow",
"token_count": 117
} | 58 |
inputs:
input_str:
type: string
default: Hello
outputs:
ouput1:
type: string
reference: ${async_passthrough1.output}
output2:
type: string
reference: ${async_passthrough2.output}
nodes:
- name: async_passthrough
type: python
source:
type: code
path: async_passthrough.py
inputs:
input1: ${inputs.input_str}
wait_seconds: 3
- name: async_passthrough1
type: python
source:
type: code
path: async_passthrough.py
inputs:
input1: ${async_passthrough.output}
wait_seconds: 3
- name: async_passthrough2
type: python
source:
type: code
path: async_passthrough.py
inputs:
input1: ${async_passthrough.output}
wait_seconds: 3
| promptflow/src/promptflow/tests/test_configs/flows/async_tools/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/async_tools/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 296
} | 59 |
model: gpt-4-1106-preview
instructions: You are a helpful assistant.
tools:
- type: code_interpreter
- type: function
source:
type: code
path: get_calorie_by_jogging.py
tool_type: python
- type: function
source:
type: code
path: get_calorie_by_swimming.py
tool_type: python
- type: function
source:
type: code
path: get_current_city.py
tool_type: python
- type: function
source:
type: code
path: get_temperature.py
tool_type: python
| promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/assistant_definition.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat-with-assistant-no-file/assistant_definition.yaml",
"repo_id": "promptflow",
"token_count": 224
} | 60 |
id: template_standard_flow
name: Template Standard Flow
inputs:
incident_content:
type: string
incident_id:
type: int
outputs:
investigation_method:
type: string
reference: ${investigation_method.output}
nodes:
- name: incident_id_extractor
type: python
source:
type: code
path: incident_id_extractor.py
inputs:
incident_content: ${inputs.incident_content}
incident_id: ${inputs.incident_id}
- name: job_info_extractor
type: python
source:
type: code
path: job_info_extractor.py
inputs:
incident_content: ${incident_id_extractor.output.incident_content}
activate:
when: ${incident_id_extractor.output.has_incident_id}
is: false
- name: incident_info_extractor
type: python
source:
type: code
path: incident_info_extractor.py
inputs:
incident: ${incident_id_extractor.output}
activate:
when: ${incident_id_extractor.output.has_incident_id}
is: true
- name: tsg_retriever
type: python
source:
type: code
path: tsg_retriever.py
inputs:
content: ${incident_info_extractor.output.incident_content}
activate:
when: ${incident_info_extractor.output.retriever}
is: tsg
- name: icm_retriever
type: python
source:
type: code
path: icm_retriever.py
inputs:
content: ${incident_info_extractor.output.incident_content}
activate:
when: ${incident_info_extractor.output.retriever}
is: icm
- name: kql_tsg_retriever
type: python
source:
type: code
path: kql_tsg_retriever.py
inputs:
content: ${incident_info_extractor.output.incident_content}
activate:
when: ${incident_info_extractor.output.retriever}
is: tsg
- name: investigation_steps
type: llm
source:
type: code
path: investigation_steps.jinja2
inputs:
deployment_name: gpt-35-turbo
temperature: 0.7
top_p: 1
stop: ""
max_tokens: 256
presence_penalty: 0
frequency_penalty: 0
logit_bias: ""
first_method: ${icm_retriever.output}
second_method: ${tsg_retriever.output}
third_method: ${kql_tsg_retriever.output}
provider: AzureOpenAI
connection: azure_open_ai_connection
api: chat
module: promptflow.tools.aoai
- name: retriever_summary
type: python
source:
type: code
path: retriever_summary.py
inputs:
summary: ${investigation_steps.output}
- name: investigation_method
type: python
source:
type: code
path: investigation_method.py
inputs:
method1: ${job_info_extractor.output}
method2: ${retriever_summary.output}
| promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_activate/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 999
} | 61 |
inputs:
case:
type: string
default: double
is_chat_input: false
value:
type: int
default: 1
outputs:
output:
type: string
reference: ${collect_node.output}
evaluation_only: false
is_chat_output: false
nodes:
- name: double
type: python
source:
type: code
path: double.py
inputs:
input: ${inputs.value}
activate:
when: ${inputs.case}
is: double
aggregation: false
- name: square
type: python
source:
type: code
path: square.py
inputs:
input: ${inputs.value}
activate:
when: ${inputs.case}
is: square
aggregation: false
- name: aggregation_double
type: python
source:
type: code
path: aggregation_node.py
inputs:
input: ${double.output}
aggregation: true
- name: aggregation_square
type: python
source:
type: code
path: aggregation_node.py
inputs:
input: ${square.output}
aggregation: true
- name: collect_node
type: python
source:
type: code
path: collect_node.py
inputs:
input1: ${double.output}
input2: ${square.output}
aggregation: false
| promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 426
} | 62 |
{"question": "What is 2 to the 10th power?"}
{"question": "What is the sum of 2 and 2?"} | promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/inputs.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_langchain_traces/inputs.jsonl",
"repo_id": "promptflow",
"token_count": 29
} | 63 |
inputs:
text:
type: string
outputs:
answer:
type: string
reference: ${echo_generator.output}
nodes:
- name: echo_generator
type: python
source:
type: code
path: echo.py
inputs:
text: ${inputs.text}
| promptflow/src/promptflow/tests/test_configs/flows/generator_nodes/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/generator_nodes/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 97
} | 64 |
inputs:
question:
type: string
chat_history:
type: list
stream:
type: bool
outputs:
answer:
type: string
reference: ${chat.output}
nodes:
- name: chat
type: python
source:
type: code
path: chat.py
inputs:
question: ${inputs.question}
chat_history: ${inputs.chat_history}
connection: azure_open_ai_connection
stream: ${inputs.stream}
| promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/openai_chat_api_flow/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 157
} | 65 |
from promptflow import tool
import sys
@tool
def print_inputs(
text: str = None,
):
print(f"STDOUT: {text}")
print(f"STDERR: {text}", file=sys.stderr)
return text
| promptflow/src/promptflow/tests/test_configs/flows/print_input_flow/print_input.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/print_input_flow/print_input.py",
"repo_id": "promptflow",
"token_count": 75
} | 66 |
from promptflow import tool
@tool
def my_python_tool_with_failed_line(idx: int, mod) -> int:
if idx % mod == 0:
raise Exception("Failed")
return idx | promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/my_python_tool_with_failed_line.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/my_python_tool_with_failed_line.py",
"repo_id": "promptflow",
"token_count": 66
} | 67 |
{"image": {"data:image/png;path":"logo_1.png"}}
{"image": {"data:image/png;path":"logo_2.png"}} | promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/image_inputs/inputs.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/image_inputs/inputs.jsonl",
"repo_id": "promptflow",
"token_count": 41
} | 68 |
inputs:
text:
type: string
outputs:
output:
type: string
reference: ${node1.output}
nodes:
- name: node1
type: python
source:
type: code
path: dummy_utils/main.py
inputs:
x: ${inputs.text}
| promptflow/src/promptflow/tests/test_configs/flows/script_with_import/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/script_with_import/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 97
} | 69 |
id: template_standard_flow
name: Template Standard Flow
inputs:
input:
type: string
is_chat_input: false
index:
type: int
is_chat_input: false
outputs:
output:
type: string
reference: ${python_node.output}
nodes:
- name: python_node
type: python
source:
type: code
path: python_node.py
inputs:
index: ${inputs.index}
input: ${inputs.input}
use_variants: false
node_variants: {}
| promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_ten_inputs/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_ten_inputs/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 171
} | 70 |
[
{
"url": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h"
},
{
"url": "https://www.microsoft.com/en-us/windows/"
}
]
| promptflow/src/promptflow/tests/test_configs/flows/web_classification_v1/samples.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_v1/samples.json",
"repo_id": "promptflow",
"token_count": 86
} | 71 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.029'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.098'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.077'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.099'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:15:32 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '379'
content-md5:
- lI/pz9jzTQ7Td3RHPL7y7w==
content-type:
- application/octet-stream
last-modified:
- Mon, 06 Nov 2023 08:30:18 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Mon, 06 Nov 2023 08:30:18 GMT
x-ms-meta-name:
- 94331215-cf7f-452a-9f1a-1d276bc9b0e4
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 3f163752-edb0-4afc-a6f5-b0a670bd7c24
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:15:33 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.082'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.082'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:15:36 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '853'
content-md5:
- ylTeNqjvuOvtzEZJ/X5n3A==
content-type:
- application/octet-stream
last-modified:
- Fri, 12 Jan 2024 08:13:57 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Fri, 12 Jan 2024 08:13:56 GMT
x-ms-meta-name:
- 950201e8-c52c-4b15-ada1-5e58de9b2f4d
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:15:38 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml",
"runId": "batch_run_name", "runDisplayName": "batch_run_name", "runExperimentName":
"", "nodeVariant": "${summarize_text_content.variant_0}", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"},
"inputsMapping": {"url": "${data.url}"}, "connections": {}, "environmentVariables":
{}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '873'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"batch_run_name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '6.627'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "fetch_text_content_from_url", "type":
"python", "source": {"type": "code", "path": "fetch_text_content_from_url.py"},
"inputs": {"fetch_url": "${inputs.url}"}, "tool": "fetch_text_content_from_url.py",
"reduce": false}, {"name": "prepare_examples", "type": "python", "source":
{"type": "code", "path": "prepare_examples.py"}, "inputs": {}, "tool": "prepare_examples.py",
"reduce": false}, {"name": "classify_with_llm", "type": "llm", "source": {"type":
"code", "path": "classify_with_llm.jinja2"}, "inputs": {"deployment_name":
"gpt-35-turbo", "suffix": "", "max_tokens": "128", "temperature": "0.1", "top_p":
"1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": "0",
"frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}",
"examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"},
"tool": "classify_with_llm.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"},
{"name": "convert_to_dict", "type": "python", "source": {"type": "code", "path":
"convert_to_dict.py"}, "inputs": {"input_str": "${classify_with_llm.output}"},
"tool": "convert_to_dict.py", "reduce": false}, {"name": "summarize_text_content",
"type": "llm", "source": {"type": "code", "path": "summarize_text_content.jinja2"},
"inputs": {"deployment_name": "gpt-35-turbo", "suffix": "", "max_tokens":
"128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False",
"stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of":
"1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "tool":
"summarize_text_content.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"}],
"tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs":
{"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "hate_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "self_harm_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "sexual_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text": {"type":
["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"description": "Use Azure Content Safety to detect harmful content.", "module":
"promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin":
true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "classify_with_llm.jinja2", "type":
"prompt", "inputs": {"examples": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text_content":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "classify_with_llm.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"convert_to_dict.py", "type": "python", "inputs": {"input_str": {"type": ["string"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"source": "convert_to_dict.py", "function": "convert_to_dict", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "fetch_text_content_from_url.py",
"type": "python", "inputs": {"fetch_url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "fetch_text_content_from_url.py",
"function": "fetch_text_content_from_url", "is_builtin": false, "enable_kwargs":
false, "tool_state": "stable"}, {"name": "prepare_examples.py", "type": "python",
"source": "prepare_examples.py", "function": "prepare_examples", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "summarize_text_content.jinja2",
"type": "prompt", "inputs": {"text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "summarize_text_content.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"summarize_text_content__variant_1.jinja2", "type": "prompt", "inputs": {"text":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "source": "summarize_text_content__variant_1.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"url": {"type": "string", "default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"is_chat_input": false}}, "outputs": {"category": {"type": "string", "reference":
"${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output":
false}, "evidence": {"type": "string", "reference": "${convert_to_dict.output.evidence}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "batch_run_name", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"url": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "83cbe8e6-2d19-46c6-8fba-7875fdb033e1",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '16239'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.263'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "fetch_text_content_from_url", "type":
"python", "source": {"type": "code", "path": "fetch_text_content_from_url.py"},
"inputs": {"fetch_url": "${inputs.url}"}, "tool": "fetch_text_content_from_url.py",
"reduce": false}, {"name": "prepare_examples", "type": "python", "source":
{"type": "code", "path": "prepare_examples.py"}, "inputs": {}, "tool": "prepare_examples.py",
"reduce": false}, {"name": "classify_with_llm", "type": "llm", "source": {"type":
"code", "path": "classify_with_llm.jinja2"}, "inputs": {"deployment_name":
"gpt-35-turbo", "suffix": "", "max_tokens": "128", "temperature": "0.1", "top_p":
"1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": "0",
"frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}",
"examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"},
"tool": "classify_with_llm.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"},
{"name": "convert_to_dict", "type": "python", "source": {"type": "code", "path":
"convert_to_dict.py"}, "inputs": {"input_str": "${classify_with_llm.output}"},
"tool": "convert_to_dict.py", "reduce": false}, {"name": "summarize_text_content",
"type": "llm", "source": {"type": "code", "path": "summarize_text_content.jinja2"},
"inputs": {"deployment_name": "gpt-35-turbo", "suffix": "", "max_tokens":
"128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False",
"stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of":
"1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "tool":
"summarize_text_content.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"}],
"tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs":
{"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "hate_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "self_harm_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "sexual_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text": {"type":
["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"description": "Use Azure Content Safety to detect harmful content.", "module":
"promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin":
true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "classify_with_llm.jinja2", "type":
"prompt", "inputs": {"examples": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text_content":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "classify_with_llm.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"convert_to_dict.py", "type": "python", "inputs": {"input_str": {"type": ["string"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"source": "convert_to_dict.py", "function": "convert_to_dict", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "fetch_text_content_from_url.py",
"type": "python", "inputs": {"fetch_url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "fetch_text_content_from_url.py",
"function": "fetch_text_content_from_url", "is_builtin": false, "enable_kwargs":
false, "tool_state": "stable"}, {"name": "prepare_examples.py", "type": "python",
"source": "prepare_examples.py", "function": "prepare_examples", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "summarize_text_content.jinja2",
"type": "prompt", "inputs": {"text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "summarize_text_content.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"summarize_text_content__variant_1.jinja2", "type": "prompt", "inputs": {"text":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "source": "summarize_text_content__variant_1.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"url": {"type": "string", "default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"is_chat_input": false}}, "outputs": {"category": {"type": "string", "reference":
"${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output":
false}, "evidence": {"type": "string", "reference": "${convert_to_dict.output.evidence}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "batch_run_name", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"url": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "83cbe8e6-2d19-46c6-8fba-7875fdb033e1",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '16239'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.488'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "fetch_text_content_from_url", "type":
"python", "source": {"type": "code", "path": "fetch_text_content_from_url.py"},
"inputs": {"fetch_url": "${inputs.url}"}, "tool": "fetch_text_content_from_url.py",
"reduce": false}, {"name": "prepare_examples", "type": "python", "source":
{"type": "code", "path": "prepare_examples.py"}, "inputs": {}, "tool": "prepare_examples.py",
"reduce": false}, {"name": "classify_with_llm", "type": "llm", "source": {"type":
"code", "path": "classify_with_llm.jinja2"}, "inputs": {"deployment_name":
"gpt-35-turbo", "suffix": "", "max_tokens": "128", "temperature": "0.1", "top_p":
"1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": "0",
"frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}",
"examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"},
"tool": "classify_with_llm.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"},
{"name": "convert_to_dict", "type": "python", "source": {"type": "code", "path":
"convert_to_dict.py"}, "inputs": {"input_str": "${classify_with_llm.output}"},
"tool": "convert_to_dict.py", "reduce": false}, {"name": "summarize_text_content",
"type": "llm", "source": {"type": "code", "path": "summarize_text_content.jinja2"},
"inputs": {"deployment_name": "gpt-35-turbo", "suffix": "", "max_tokens":
"128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False",
"stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of":
"1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "tool":
"summarize_text_content.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"}],
"tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs":
{"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "hate_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "self_harm_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "sexual_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text": {"type":
["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"description": "Use Azure Content Safety to detect harmful content.", "module":
"promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin":
true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "classify_with_llm.jinja2", "type":
"prompt", "inputs": {"examples": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text_content":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "classify_with_llm.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"convert_to_dict.py", "type": "python", "inputs": {"input_str": {"type": ["string"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"source": "convert_to_dict.py", "function": "convert_to_dict", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "fetch_text_content_from_url.py",
"type": "python", "inputs": {"fetch_url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "fetch_text_content_from_url.py",
"function": "fetch_text_content_from_url", "is_builtin": false, "enable_kwargs":
false, "tool_state": "stable"}, {"name": "prepare_examples.py", "type": "python",
"source": "prepare_examples.py", "function": "prepare_examples", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "summarize_text_content.jinja2",
"type": "prompt", "inputs": {"text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "summarize_text_content.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"summarize_text_content__variant_1.jinja2", "type": "prompt", "inputs": {"text":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "source": "summarize_text_content__variant_1.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"url": {"type": "string", "default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"is_chat_input": false}}, "outputs": {"category": {"type": "string", "reference":
"${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output":
false}, "evidence": {"type": "string", "reference": "${convert_to_dict.output.evidence}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "batch_run_name", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"url": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "83cbe8e6-2d19-46c6-8fba-7875fdb033e1",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '16239'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.469'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "fetch_text_content_from_url", "type":
"python", "source": {"type": "code", "path": "fetch_text_content_from_url.py"},
"inputs": {"fetch_url": "${inputs.url}"}, "tool": "fetch_text_content_from_url.py",
"reduce": false}, {"name": "prepare_examples", "type": "python", "source":
{"type": "code", "path": "prepare_examples.py"}, "inputs": {}, "tool": "prepare_examples.py",
"reduce": false}, {"name": "classify_with_llm", "type": "llm", "source": {"type":
"code", "path": "classify_with_llm.jinja2"}, "inputs": {"deployment_name":
"gpt-35-turbo", "suffix": "", "max_tokens": "128", "temperature": "0.1", "top_p":
"1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": "0",
"frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}",
"examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"},
"tool": "classify_with_llm.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"},
{"name": "convert_to_dict", "type": "python", "source": {"type": "code", "path":
"convert_to_dict.py"}, "inputs": {"input_str": "${classify_with_llm.output}"},
"tool": "convert_to_dict.py", "reduce": false}, {"name": "summarize_text_content",
"type": "llm", "source": {"type": "code", "path": "summarize_text_content.jinja2"},
"inputs": {"deployment_name": "gpt-35-turbo", "suffix": "", "max_tokens":
"128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False",
"stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of":
"1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "tool":
"summarize_text_content.jinja2", "reduce": false, "api": "chat", "provider":
"AzureOpenAI", "connection": "azure_open_ai_connection", "module": "promptflow.tools.aoai"}],
"tools": [{"name": "Content Safety (Text Analyze)", "type": "python", "inputs":
{"connection": {"type": ["AzureContentSafetyConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "hate_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "self_harm_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "sexual_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text": {"type":
["string"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "violence_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"description": "Use Azure Content Safety to detect harmful content.", "module":
"promptflow.tools.azure_content_safety", "function": "analyze_text", "is_builtin":
true, "package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "classify_with_llm.jinja2", "type":
"prompt", "inputs": {"examples": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "text_content":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "classify_with_llm.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"convert_to_dict.py", "type": "python", "inputs": {"input_str": {"type": ["string"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"}},
"source": "convert_to_dict.py", "function": "convert_to_dict", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "fetch_text_content_from_url.py",
"type": "python", "inputs": {"fetch_url": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "fetch_text_content_from_url.py",
"function": "fetch_text_content_from_url", "is_builtin": false, "enable_kwargs":
false, "tool_state": "stable"}, {"name": "prepare_examples.py", "type": "python",
"source": "prepare_examples.py", "function": "prepare_examples", "is_builtin":
false, "enable_kwargs": false, "tool_state": "stable"}, {"name": "summarize_text_content.jinja2",
"type": "prompt", "inputs": {"text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "summarize_text_content.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}, {"name":
"summarize_text_content__variant_1.jinja2", "type": "prompt", "inputs": {"text":
{"type": ["string"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "source": "summarize_text_content__variant_1.jinja2",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"url": {"type": "string", "default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h",
"is_chat_input": false}}, "outputs": {"category": {"type": "string", "reference":
"${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output":
false}, "evidence": {"type": "string", "reference": "${convert_to_dict.output.evidence}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "batch_run_name", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"url": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "83cbe8e6-2d19-46c6-8fba-7875fdb033e1",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '16239'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.256'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.080'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.129'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:16:24 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '379'
content-md5:
- lI/pz9jzTQ7Td3RHPL7y7w==
content-type:
- application/octet-stream
last-modified:
- Mon, 06 Nov 2023 08:30:18 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Mon, 06 Nov 2023 08:30:18 GMT
x-ms-meta-name:
- 94331215-cf7f-452a-9f1a-1d276bc9b0e4
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 3f163752-edb0-4afc-a6f5-b0a670bd7c24
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:16:25 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.135'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.089'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:16:29 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/eval-classification-accuracy/calculate_accuracy.py
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '409'
content-md5:
- OyENtlqGVUTrY5zKuzo8XA==
content-type:
- application/octet-stream
last-modified:
- Tue, 21 Nov 2023 08:03:40 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Tue, 21 Nov 2023 08:03:39 GMT
x-ms-meta-name:
- fd932777-4f3a-4c1d-9c3a-24d45835d7e1
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:16:30 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/eval-classification-accuracy/calculate_accuracy.py
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/eval-classification-accuracy/flow.dag.yaml",
"runId": "eval_run_name", "runDisplayName": "eval_run_name", "runExperimentName":
"", "variantRunId": "batch_run_name", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"},
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.category}"},
"connections": {}, "environmentVariables": {}, "runtimeName": "fake-runtime-name",
"sessionId": "000000000000000000000000000000000000000000000000", "sessionSetupMode":
"SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '947'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"eval_run_name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '6.931'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source":
{"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}",
"prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false},
{"name": "calculate_accuracy", "type": "python", "source": {"type": "code",
"path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"},
"tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content
Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type":
["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "hate_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "self_harm_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "sexual_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "violence_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "description":
"Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety",
"function": "analyze_text", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools":
["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state":
"stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection":
{"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type":
"python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py",
"function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false,
"tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs":
{"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py",
"function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP",
"description": "Please specify the groundtruth column, which contains the
true label to the outputs that your flow produces.", "is_chat_input": false},
"prediction": {"type": "string", "default": "APP", "description": "Please
specify the prediction column, which contains the predicted outputs that your
flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string",
"reference": "${grade.output}", "evaluation_only": false, "is_chat_output":
false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name",
"flowRunId": "eval_run_name", "flowRunDisplayName": "eval_run_name", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.category}"},
"outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "73d0c061-880c-466c-8fb0-b29e9ae7ab66",
"studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '13869'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.388'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source":
{"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}",
"prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false},
{"name": "calculate_accuracy", "type": "python", "source": {"type": "code",
"path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"},
"tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content
Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type":
["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "hate_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "self_harm_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "sexual_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "violence_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "description":
"Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety",
"function": "analyze_text", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools":
["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state":
"stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection":
{"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type":
"python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py",
"function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false,
"tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs":
{"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py",
"function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP",
"description": "Please specify the groundtruth column, which contains the
true label to the outputs that your flow produces.", "is_chat_input": false},
"prediction": {"type": "string", "default": "APP", "description": "Please
specify the prediction column, which contains the predicted outputs that your
flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string",
"reference": "${grade.output}", "evaluation_only": false, "is_chat_output":
false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name",
"flowRunId": "eval_run_name", "flowRunDisplayName": "eval_run_name", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.category}"},
"outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "73d0c061-880c-466c-8fb0-b29e9ae7ab66",
"studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '13869'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.270'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source":
{"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}",
"prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false},
{"name": "calculate_accuracy", "type": "python", "source": {"type": "code",
"path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"},
"tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content
Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type":
["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "hate_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "self_harm_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "sexual_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "violence_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "description":
"Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety",
"function": "analyze_text", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools":
["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state":
"stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection":
{"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type":
"python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py",
"function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false,
"tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs":
{"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py",
"function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP",
"description": "Please specify the groundtruth column, which contains the
true label to the outputs that your flow produces.", "is_chat_input": false},
"prediction": {"type": "string", "default": "APP", "description": "Please
specify the prediction column, which contains the predicted outputs that your
flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string",
"reference": "${grade.output}", "evaluation_only": false, "is_chat_output":
false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name",
"flowRunId": "eval_run_name", "flowRunDisplayName": "eval_run_name", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.category}"},
"outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "73d0c061-880c-466c-8fb0-b29e9ae7ab66",
"studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '13869'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.336'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source":
{"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}",
"prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false},
{"name": "calculate_accuracy", "type": "python", "source": {"type": "code",
"path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"},
"tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content
Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type":
["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "hate_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "self_harm_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "sexual_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "violence_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "description":
"Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety",
"function": "analyze_text", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools":
["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state":
"stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection":
{"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type":
"python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py",
"function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false,
"tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs":
{"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py",
"function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP",
"description": "Please specify the groundtruth column, which contains the
true label to the outputs that your flow produces.", "is_chat_input": false},
"prediction": {"type": "string", "default": "APP", "description": "Please
specify the prediction column, which contains the predicted outputs that your
flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string",
"reference": "${grade.output}", "evaluation_only": false, "is_chat_output":
false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name",
"flowRunId": "eval_run_name", "flowRunDisplayName": "eval_run_name", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.category}"},
"outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "73d0c061-880c-466c-8fb0-b29e9ae7ab66",
"studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '13869'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.366'
status:
code: 200
message: OK
- request:
body: '{"runId": "batch_run_name", "selectRunMetadata": true, "selectRunDefinition":
true, "selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705047344, "rootRunId": "batch_run_name",
"createdUtc": "2024-01-12T08:15:44.0948865+00:00", "createdBy": {"userObjectId":
"00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 6,
"statusRevision": 3, "runUuid": "e52bd2e7-d460-459a-a3ab-07d848ea525d", "parentRunUuid":
null, "rootRunUuid": "e52bd2e7-d460-459a-a3ab-07d848ea525d", "lastStartTimeUtc":
null, "currentComputeTime": null, "computeDuration": "00:00:06.7505192", "effectiveStartTimeUtc":
null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:16:06.9067508+00:00", "duration":
"00:00:06.7505192", "cancelationReason": null, "currentAttemptId": 1, "runId":
"batch_run_name", "parentRunId": null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009",
"status": "Completed", "startTimeUtc": "2024-01-12T08:16:01.0613927+00:00",
"endTimeUtc": "2024-01-12T08:16:07.8119119+00:00", "scheduleId": null, "displayName":
"batch_run_name", "name": null, "dataContainerId": "dcid.batch_run_name",
"description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun",
"runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow",
"computeType": "AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name":
"test-runtime-ci", "azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name":
"flow.dag.yaml", "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597",
"azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0",
"azureml.promptflow.node_variant": "${summarize_text_content.variant_0}",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/a1fa6ef1ead7ff3ce76b36250f6f5461/web_classification/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl",
"azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "_azureml.evaluation_run":
"promptflow.BatchRun", "azureml.promptflow.snapshot_id": "83cbe8e6-2d19-46c6-8fba-7875fdb033e1",
"azureml.promptflow.total_tokens": "2448", "_azureml.evaluate_artifacts":
"[{\"path\": \"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters":
{}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets":
[], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets":
[], "runDefinition": null, "jobSpecification": null, "primaryMetricName":
null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri":
null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId":
"azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '4732'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.049'
status:
code: 200
message: OK
- request:
body: '{"runId": "eval_run_name", "selectRunMetadata": true, "selectRunDefinition":
true, "selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705047396, "rootRunId": "eval_run_name",
"createdUtc": "2024-01-12T08:16:36.0629362+00:00", "createdBy": {"userObjectId":
"00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 6,
"statusRevision": 3, "runUuid": "a3978660-b3ed-4116-b125-6a52e8c8e4cb", "parentRunUuid":
null, "rootRunUuid": "a3978660-b3ed-4116-b125-6a52e8c8e4cb", "lastStartTimeUtc":
null, "currentComputeTime": null, "computeDuration": "00:00:04.8368673", "effectiveStartTimeUtc":
null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:16:57.9353592+00:00", "duration":
"00:00:04.8368673", "cancelationReason": null, "currentAttemptId": 1, "runId":
"eval_run_name", "parentRunId": null, "experimentId": "7bdec279-f99c-4ed3-b0b8-dd75698b8fd0",
"status": "Completed", "startTimeUtc": "2024-01-12T08:16:54.0128269+00:00",
"endTimeUtc": "2024-01-12T08:16:58.8496942+00:00", "scheduleId": null, "displayName":
"eval_run_name", "name": null, "dataContainerId": "dcid.eval_run_name", "description":
null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2":
{"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType":
"AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name": "test-runtime-ci",
"azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name":
"flow.dag.yaml", "azureml.promptflow.session_id": "f8e4236a4e78e7f7125bbd811ec7976cb330412723a530f8",
"azureml.promptflow.flow_lineage_id": "26c575d863a85371ef937096728441d8c68c3e737b5a1bfeae5ac8f3b9ccb048",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/1aa3064d06f6170abbc488cc35c713b9/eval-classification-accuracy/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl",
"azureml.promptflow.input_run_id": "batch_run_name", "azureml.promptflow.inputs_mapping":
"{\"groundtruth\":\"${data.answer}\",\"prediction\":\"${run.outputs.category}\"}",
"_azureml.evaluation_run": "promptflow.BatchRun", "azureml.promptflow.snapshot_id":
"73d0c061-880c-466c-8fb0-b29e9ae7ab66", "azureml.promptflow.total_tokens":
"0", "_azureml.evaluate_artifacts": "[{\"path\": \"instance_results.jsonl\",
\"type\": \"table\"}]"}, "parameters": {}, "actionUris": {}, "scriptName":
null, "target": null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings":
{}, "services": {}, "inputDatasets": [], "outputDatasets": [], "runDefinition":
null, "jobSpecification": null, "primaryMetricName": null, "createdFrom":
null, "cancelUri": null, "completeUri": null, "diagnosticsUri": null, "computeRequest":
null, "compute": null, "retainForLifetimeOfWorkspace": false, "queueingInfo":
null, "inputs": null, "outputs": {"debug_info": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '4794'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.066'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name/logContent
response:
body:
string: '"2024-01-12 08:15:48 +0000 106 promptflow-runtime INFO [batch_run_name]
Receiving v2 bulk run request 3fe0e3e1-2e3a-44a7-94c9-4a64bc9388a5: {\"flow_id\":
\"batch_run_name\", \"flow_run_id\": \"batch_run_name\", \"flow_source\":
{\"flow_source_type\": 1, \"flow_source_info\": {\"snapshot_id\": \"83cbe8e6-2d19-46c6-8fba-7875fdb033e1\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"connections\": \"**data_scrubbed**\",
\"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.batch_run_name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T08%3A05%3A43Z&ske=2024-01-13T16%3A15%3A43Z&sks=b&skv=2019-07-07&st=2024-01-12T08%3A05%3A47Z&se=2024-01-12T16%3A15%3A47Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl\"},
\"inputs_mapping\": {\"url\": \"${data.url}\"}, \"azure_storage_setting\":
{\"azure_storage_mode\": 1, \"storage_account_name\": \"promptfloweast4063704120\",
\"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\",
\"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/batch_run_name\",
\"blob_container_sas_token\": \"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T08%3A15%3A48Z&ske=2024-01-19T08%3A15%3A48Z&sks=b&skv=2019-07-07&se=2024-01-19T08%3A15%3A48Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 08:15:48 +0000 106
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:15:48 +0000 106 promptflow-runtime INFO Updating
batch_run_name to Status.Preparing...\n2024-01-12 08:15:48 +0000 106 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/34817/requests/batch_run_name\n2024-01-12
08:15:48 +0000 106 promptflow-runtime INFO Get snapshot sas url for
83cbe8e6-2d19-46c6-8fba-7875fdb033e1...\n2024-01-12 08:15:55 +0000 106
promptflow-runtime INFO Downloading snapshot 83cbe8e6-2d19-46c6-8fba-7875fdb033e1
from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/83cbe8e6-2d19-46c6-8fba-7875fdb033e1.zip...\n2024-01-12
08:15:55 +0000 106 promptflow-runtime INFO Downloaded file /mnt/host/service/app/34817/requests/batch_run_name/83cbe8e6-2d19-46c6-8fba-7875fdb033e1.zip
with size 5027 for snapshot 83cbe8e6-2d19-46c6-8fba-7875fdb033e1.\n2024-01-12
08:15:55 +0000 106 promptflow-runtime INFO Download snapshot 83cbe8e6-2d19-46c6-8fba-7875fdb033e1
completed.\n2024-01-12 08:15:55 +0000 106 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/34817/requests/batch_run_name\n2024-01-12
08:15:55 +0000 106 promptflow-runtime INFO About to execute a python
flow.\n2024-01-12 08:15:55 +0000 106 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-12 08:15:55 +0000 106 promptflow-runtime
INFO Starting to check process 3917 status for run batch_run_name\n2024-01-12
08:15:55 +0000 106 promptflow-runtime INFO Start checking run status
for run batch_run_name\n2024-01-12 08:15:59 +0000 3917 promptflow-runtime
INFO [106--3917] Start processing flowV2......\n2024-01-12 08:15:59 +0000 3917
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:15:59 +0000 3917 promptflow-runtime INFO Setting
mlflow tracking uri...\n2024-01-12 08:15:59 +0000 3917 promptflow-runtime
INFO Validating ''AzureML Data Scientist'' user authentication...\n2024-01-12
08:15:59 +0000 3917 promptflow-runtime INFO Successfully validated
''AzureML Data Scientist'' user authentication.\n2024-01-12 08:15:59 +0000 3917
promptflow-runtime INFO Using AzureMLRunStorageV2\n2024-01-12 08:15:59
+0000 3917 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:15:59 +0000 3917 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-12 08:15:59 +0000 3917 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:16:00 +0000 3917 promptflow-runtime INFO Resolve data from url finished
in 0.7087652487680316 seconds\n2024-01-12 08:16:00 +0000 3917 promptflow-runtime
INFO Starting the aml run ''batch_run_name''...\n2024-01-12 08:16:01 +0000 3917
execution.bulk INFO Using fork, process count: 3\n2024-01-12 08:16:01
+0000 3959 execution.bulk INFO Process 3959 started.\n2024-01-12
08:16:01 +0000 3964 execution.bulk INFO Process 3964 started.\n2024-01-12
08:16:01 +0000 3917 execution.bulk INFO Process name: ForkProcess-30:2,
Process id: 3959, Line number: 0 start execution.\n2024-01-12 08:16:01 +0000 3969
execution.bulk INFO Process 3969 started.\n2024-01-12 08:16:01 +0000 3917
execution.bulk INFO Process name: ForkProcess-30:3, Process id: 3964,
Line number: 1 start execution.\n2024-01-12 08:16:01 +0000 3917 execution.bulk INFO Process
name: ForkProcess-30:4, Process id: 3969, Line number: 2 start execution.\n2024-01-12
08:16:02 +0000 3917 execution.bulk INFO Process name: ForkProcess-30:4,
Process id: 3969, Line number: 2 completed.\n2024-01-12 08:16:02 +0000 3917
execution.bulk INFO Finished 1 / 3 lines.\n2024-01-12 08:16:02 +0000 3917
execution.bulk INFO Process name: ForkProcess-30:3, Process id: 3964,
Line number: 1 completed.\n2024-01-12 08:16:02 +0000 3917 execution.bulk INFO Average
execution time for completed lines: 1.52 seconds. Estimated time for incomplete
lines: 3.04 seconds.\n2024-01-12 08:16:02 +0000 3917 execution.bulk INFO Finished
2 / 3 lines.\n2024-01-12 08:16:02 +0000 3917 execution.bulk INFO Average
execution time for completed lines: 0.78 seconds. Estimated time for incomplete
lines: 0.78 seconds.\n2024-01-12 08:16:03 +0000 3917 execution.bulk INFO Process
name: ForkProcess-30:2, Process id: 3959, Line number: 0 completed.\n2024-01-12
08:16:03 +0000 3917 execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12
08:16:03 +0000 3917 execution.bulk INFO Average execution time
for completed lines: 0.61 seconds. Estimated time for incomplete lines: 0.0
seconds.\n2024-01-12 08:16:06 +0000 3917 execution.bulk INFO Upload
status summary metrics for run batch_run_name finished in 2.639357965439558
seconds\n2024-01-12 08:16:06 +0000 3917 promptflow-runtime INFO Successfully
write run properties {\"azureml.promptflow.total_tokens\": 2448, \"_azureml.evaluate_artifacts\":
\"[{\\\"path\\\": \\\"instance_results.jsonl\\\", \\\"type\\\": \\\"table\\\"}]\"}
with run id ''batch_run_name''\n2024-01-12 08:16:06 +0000 3917 execution.bulk INFO Upload
RH properties for run batch_run_name finished in 0.07268641889095306 seconds\n2024-01-12
08:16:06 +0000 3917 promptflow-runtime INFO Creating unregistered output
Asset for Run batch_run_name...\n2024-01-12 08:16:07 +0000 3917 promptflow-runtime
INFO Created debug_info Asset: azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1\n2024-01-12
08:16:07 +0000 3917 promptflow-runtime INFO Creating unregistered output
Asset for Run batch_run_name...\n2024-01-12 08:16:07 +0000 3917 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1\n2024-01-12
08:16:07 +0000 3917 promptflow-runtime INFO Creating Artifact for Run
batch_run_name...\n2024-01-12 08:16:07 +0000 3917 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-12 08:16:07 +0000 3917 promptflow-runtime
INFO Patching batch_run_name...\n2024-01-12 08:16:07 +0000 3917 promptflow-runtime
INFO Ending the aml run ''batch_run_name'' with status ''Completed''...\n2024-01-12
08:16:09 +0000 106 promptflow-runtime INFO Process 3917 finished\n2024-01-12
08:16:09 +0000 106 promptflow-runtime INFO [106] Child process finished!\n2024-01-12
08:16:09 +0000 106 promptflow-runtime INFO [batch_run_name] End processing
bulk run\n2024-01-12 08:16:09 +0000 106 promptflow-runtime INFO Cleanup
working dir /mnt/host/service/app/34817/requests/batch_run_name for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '9861'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.454'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name/logContent
response:
body:
string: '"2024-01-12 08:16:40 +0000 134 promptflow-runtime INFO [eval_run_name]
Receiving v2 bulk run request abc15e77-e71f-4626-8037-8f58ccd9b423: {\"flow_id\":
\"eval_run_name\", \"flow_run_id\": \"eval_run_name\", \"flow_source\": {\"flow_source_type\":
1, \"flow_source_info\": {\"snapshot_id\": \"73d0c061-880c-466c-8fb0-b29e9ae7ab66\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.eval_run_name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A46%3A24Z&ske=2024-01-13T15%3A56%3A24Z&sks=b&skv=2019-07-07&st=2024-01-12T08%3A06%3A39Z&se=2024-01-12T16%3A16%3A39Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl\",
\"run.outputs\": \"azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1\"},
\"inputs_mapping\": {\"groundtruth\": \"${data.answer}\", \"prediction\":
\"${run.outputs.category}\"}, \"azure_storage_setting\": {\"azure_storage_mode\":
1, \"storage_account_name\": \"promptfloweast4063704120\", \"blob_container_name\":
\"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\", \"flow_artifacts_root_path\":
\"promptflow/PromptFlowArtifacts/eval_run_name\", \"blob_container_sas_token\":
\"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T08%3A16%3A40Z&ske=2024-01-19T08%3A16%3A40Z&sks=b&skv=2019-07-07&se=2024-01-19T08%3A16%3A40Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 08:16:40 +0000 134
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:16:40 +0000 134 promptflow-runtime INFO Updating
eval_run_name to Status.Preparing...\n2024-01-12 08:16:41 +0000 134 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/38343/requests/eval_run_name\n2024-01-12
08:16:41 +0000 134 promptflow-runtime INFO Get snapshot sas url for
73d0c061-880c-466c-8fb0-b29e9ae7ab66...\n2024-01-12 08:16:47 +0000 134
promptflow-runtime INFO Downloading snapshot 73d0c061-880c-466c-8fb0-b29e9ae7ab66
from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/73d0c061-880c-466c-8fb0-b29e9ae7ab66.zip...\n2024-01-12
08:16:47 +0000 134 promptflow-runtime INFO Downloaded file /mnt/host/service/app/38343/requests/eval_run_name/73d0c061-880c-466c-8fb0-b29e9ae7ab66.zip
with size 1243 for snapshot 73d0c061-880c-466c-8fb0-b29e9ae7ab66.\n2024-01-12
08:16:47 +0000 134 promptflow-runtime INFO Download snapshot 73d0c061-880c-466c-8fb0-b29e9ae7ab66
completed.\n2024-01-12 08:16:47 +0000 134 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/38343/requests/eval_run_name\n2024-01-12
08:16:47 +0000 134 promptflow-runtime INFO About to execute a python
flow.\n2024-01-12 08:16:47 +0000 134 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-12 08:16:47 +0000 134 promptflow-runtime
INFO Starting to check process 4041 status for run eval_run_name\n2024-01-12
08:16:47 +0000 134 promptflow-runtime INFO Start checking run status
for run eval_run_name\n2024-01-12 08:16:51 +0000 4041 promptflow-runtime
INFO [134--4041] Start processing flowV2......\n2024-01-12 08:16:51 +0000 4041
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:16:51 +0000 4041 promptflow-runtime INFO Setting
mlflow tracking uri...\n2024-01-12 08:16:52 +0000 4041 promptflow-runtime
INFO Validating ''AzureML Data Scientist'' user authentication...\n2024-01-12
08:16:52 +0000 4041 promptflow-runtime INFO Successfully validated
''AzureML Data Scientist'' user authentication.\n2024-01-12 08:16:52 +0000 4041
promptflow-runtime INFO Using AzureMLRunStorageV2\n2024-01-12 08:16:52
+0000 4041 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:16:52 +0000 4041 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-12 08:16:52 +0000 4041 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:16:52 +0000 4041 promptflow-runtime INFO Resolve data from url finished
in 0.45952979754656553 seconds\n2024-01-12 08:16:53 +0000 4041 promptflow-runtime
INFO Resolve data from url finished in 0.7092651706188917 seconds\n2024-01-12
08:16:53 +0000 4041 promptflow-runtime INFO Starting the aml run ''eval_run_name''...\n2024-01-12
08:16:54 +0000 4041 execution.bulk INFO Using fork, process count:
3\n2024-01-12 08:16:54 +0000 4083 execution.bulk INFO Process 4083
started.\n2024-01-12 08:16:54 +0000 4093 execution.bulk INFO Process
4093 started.\n2024-01-12 08:16:54 +0000 4041 execution.bulk INFO Process
name: ForkProcess-38:2, Process id: 4083, Line number: 0 start execution.\n2024-01-12
08:16:54 +0000 4041 execution.bulk INFO Process name: ForkProcess-38:4,
Process id: 4093, Line number: 1 start execution.\n2024-01-12 08:16:54 +0000 4041
execution.bulk INFO Process name: ForkProcess-38:2, Process id: 4083,
Line number: 0 completed.\n2024-01-12 08:16:54 +0000 4088 execution.bulk INFO Process
4088 started.\n2024-01-12 08:16:54 +0000 4041 execution.bulk INFO Finished
1 / 3 lines.\n2024-01-12 08:16:54 +0000 4041 execution.bulk INFO Process
name: ForkProcess-38:4, Process id: 4093, Line number: 1 completed.\n2024-01-12
08:16:54 +0000 4041 execution.bulk INFO Process name: ForkProcess-38:3,
Process id: 4088, Line number: 2 start execution.\n2024-01-12 08:16:54 +0000 4041
execution.bulk INFO Average execution time for completed lines: 0.23
seconds. Estimated time for incomplete lines: 0.46 seconds.\n2024-01-12 08:16:54
+0000 4041 execution.bulk INFO Finished 2 / 3 lines.\n2024-01-12
08:16:54 +0000 4041 execution.bulk INFO Average execution time
for completed lines: 0.13 seconds. Estimated time for incomplete lines: 0.13
seconds.\n2024-01-12 08:16:54 +0000 4041 execution.bulk INFO Process
name: ForkProcess-38:3, Process id: 4088, Line number: 2 completed.\n2024-01-12
08:16:54 +0000 4041 execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12
08:16:54 +0000 4041 execution.bulk INFO Average execution time
for completed lines: 0.13 seconds. Estimated time for incomplete lines: 0.0
seconds.\n2024-01-12 08:16:55 +0000 4041 execution.bulk INFO Executing
aggregation nodes...\n2024-01-12 08:16:55 +0000 4041 execution.bulk INFO Finish
executing aggregation nodes.\n2024-01-12 08:16:57 +0000 4041 execution.bulk INFO Upload
status summary metrics for run eval_run_name finished in 1.7071036528795958
seconds\n2024-01-12 08:16:57 +0000 4041 execution.bulk INFO Upload
metrics for run eval_run_name finished in 0.39980557933449745 seconds\n2024-01-12
08:16:57 +0000 4041 promptflow-runtime INFO Successfully write run
properties {\"azureml.promptflow.total_tokens\": 0, \"_azureml.evaluate_artifacts\":
\"[{\\\"path\\\": \\\"instance_results.jsonl\\\", \\\"type\\\": \\\"table\\\"}]\"}
with run id ''eval_run_name''\n2024-01-12 08:16:57 +0000 4041 execution.bulk INFO Upload
RH properties for run eval_run_name finished in 0.11383599042892456 seconds\n2024-01-12
08:16:58 +0000 4041 promptflow-runtime INFO Creating unregistered output
Asset for Run eval_run_name...\n2024-01-12 08:16:58 +0000 4041 promptflow-runtime
INFO Created debug_info Asset: azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_debug_info/versions/1\n2024-01-12
08:16:58 +0000 4041 promptflow-runtime INFO Creating unregistered output
Asset for Run eval_run_name...\n2024-01-12 08:16:58 +0000 4041 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_flow_outputs/versions/1\n2024-01-12
08:16:58 +0000 4041 promptflow-runtime INFO Creating Artifact for Run
eval_run_name...\n2024-01-12 08:16:58 +0000 4041 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-12 08:16:58 +0000 4041 promptflow-runtime
INFO Patching eval_run_name...\n2024-01-12 08:16:58 +0000 4041 promptflow-runtime
INFO Ending the aml run ''eval_run_name'' with status ''Completed''...\n2024-01-12
08:17:00 +0000 134 promptflow-runtime INFO Process 4041 finished\n2024-01-12
08:17:00 +0000 134 promptflow-runtime INFO [134] Child process finished!\n2024-01-12
08:17:00 +0000 134 promptflow-runtime INFO [eval_run_name] End processing
bulk run\n2024-01-12 08:17:00 +0000 134 promptflow-runtime INFO Cleanup
working dir /mnt/host/service/app/38343/requests/eval_run_name for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '10623'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.650'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_basic_evaluation.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_basic_evaluation.yaml",
"repo_id": "promptflow",
"token_count": 88949
} | 72 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.017'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.716'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.105'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.140'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 30 Nov 2023 08:11:19 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '49'
content-md5:
- quXiEreYvPinSj0HsaNa/g==
content-type:
- application/octet-stream
last-modified:
- Wed, 08 Nov 2023 04:26:09 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Wed, 08 Nov 2023 04:26:09 GMT
x-ms-meta-name:
- c4092674-5e53-4c17-b78d-75353ae0edb6
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 579021dc-8ac8-4c73-8110-4642bd00c69b
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 30 Nov 2023 08:11:21 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.103'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.107'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 30 Nov 2023 08:11:25 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '245'
content-md5:
- F+JA0a3CxcLYZ0ANRdlZbA==
content-type:
- application/octet-stream
last-modified:
- Wed, 29 Nov 2023 02:51:35 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Thu, 17 Aug 2023 10:30:09 GMT
x-ms-meta-name:
- 56efdd28-6297-4baa-aad3-be46f4b768a2
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 30 Nov 2023 08:11:26 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_env_var/flow.dag.yaml
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.070'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.104'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 30 Nov 2023 08:11:35 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '49'
content-md5:
- quXiEreYvPinSj0HsaNa/g==
content-type:
- application/octet-stream
last-modified:
- Wed, 08 Nov 2023 04:26:09 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Wed, 08 Nov 2023 04:26:09 GMT
x-ms-meta-name:
- c4092674-5e53-4c17-b78d-75353ae0edb6
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 579021dc-8ac8-4c73-8110-4642bd00c69b
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 30 Nov 2023 08:11:36 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
vary:
- Accept-Encoding
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.073'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 promptflow/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
x-cache:
- CONFIG_NOCACHE
x-content-type-options:
- nosniff
x-request-time:
- '0.092'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 30 Nov 2023 08:11:40 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '245'
content-md5:
- F+JA0a3CxcLYZ0ANRdlZbA==
content-type:
- application/octet-stream
last-modified:
- Wed, 29 Nov 2023 02:51:35 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Thu, 17 Aug 2023 10:30:09 GMT
x-ms-meta-name:
- 56efdd28-6297-4baa-aad3-be46f4b768a2
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 30 Nov 2023 08:11:42 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_env_var/flow.dag.yaml
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_request_id_when_making_http_requests.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_request_id_when_making_http_requests.yaml",
"repo_id": "promptflow",
"token_count": 12048
} | 73 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.026'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.151'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.095'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.224'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 07:51:37 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '379'
content-md5:
- lI/pz9jzTQ7Td3RHPL7y7w==
content-type:
- application/octet-stream
last-modified:
- Mon, 06 Nov 2023 08:30:18 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Mon, 06 Nov 2023 08:30:18 GMT
x-ms-meta-name:
- 94331215-cf7f-452a-9f1a-1d276bc9b0e4
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 3f163752-edb0-4afc-a6f5-b0a670bd7c24
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 07:51:38 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.070'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.136'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 07:51:41 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/hello-world/flow.dag.yaml
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '266'
content-md5:
- UZm3TyOoKWjSR23+Up6qUA==
content-type:
- application/octet-stream
last-modified:
- Tue, 19 Dec 2023 06:05:25 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Tue, 19 Dec 2023 06:05:25 GMT
x-ms-meta-name:
- 7b68bf5e-6ef4-4eb3-9f49-28f9a5baad87
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 07:51:43 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/hello-world/flow.dag.yaml
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/hello-world/flow.dag.yaml",
"runId": "batch_run_name", "runDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"runExperimentName": "", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"},
"inputsMapping": {"name": "${data.url}"}, "connections": {}, "environmentVariables":
{}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '812'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"batch_run_name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '7.941'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "38053316-7591-4ac8-b718-d76b16b48bbe",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.407'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "38053316-7591-4ac8-b718-d76b16b48bbe",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.393'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "38053316-7591-4ac8-b718-d76b16b48bbe",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.373'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "38053316-7591-4ac8-b718-d76b16b48bbe",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.523'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.066'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.128'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 07:52:31 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '379'
content-md5:
- lI/pz9jzTQ7Td3RHPL7y7w==
content-type:
- application/octet-stream
last-modified:
- Mon, 06 Nov 2023 08:30:18 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Mon, 06 Nov 2023 08:30:18 GMT
x-ms-meta-name:
- 94331215-cf7f-452a-9f1a-1d276bc9b0e4
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 3f163752-edb0-4afc-a6f5-b0a670bd7c24
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 07:52:32 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.065'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.113'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 07:52:35 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/eval-classification-accuracy/calculate_accuracy.py
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '409'
content-md5:
- OyENtlqGVUTrY5zKuzo8XA==
content-type:
- application/octet-stream
last-modified:
- Tue, 21 Nov 2023 08:03:40 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Tue, 21 Nov 2023 08:03:39 GMT
x-ms-meta-name:
- fd932777-4f3a-4c1d-9c3a-24d45835d7e1
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 07:52:36 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/eval-classification-accuracy/calculate_accuracy.py
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/eval-classification-accuracy/flow.dag.yaml",
"runId": "eval_run_name", "runDisplayName": "sdk-cli-test-fixture-eval-run-without-llm",
"runExperimentName": "", "variantRunId": "batch_run_name", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"},
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"},
"connections": {}, "environmentVariables": {}, "runtimeName": "fake-runtime-name",
"sessionId": "000000000000000000000000000000000000000000000000", "sessionSetupMode":
"SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '950'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"eval_run_name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '5.293'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source":
{"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}",
"prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false},
{"name": "calculate_accuracy", "type": "python", "source": {"type": "code",
"path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"},
"tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content
Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type":
["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "hate_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "self_harm_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "sexual_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "violence_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "description":
"Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety",
"function": "analyze_text", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools":
["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state":
"stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection":
{"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type":
"python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py",
"function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false,
"tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs":
{"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py",
"function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP",
"description": "Please specify the groundtruth column, which contains the
true label to the outputs that your flow produces.", "is_chat_input": false},
"prediction": {"type": "string", "default": "APP", "description": "Please
specify the prediction column, which contains the predicted outputs that your
flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string",
"reference": "${grade.output}", "evaluation_only": false, "is_chat_output":
false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name",
"flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"},
"outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "1885cd85-1969-4fca-8c21-d8826ed5d886",
"studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '13872'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.547'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source":
{"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}",
"prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false},
{"name": "calculate_accuracy", "type": "python", "source": {"type": "code",
"path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"},
"tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content
Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type":
["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "hate_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "self_harm_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "sexual_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "violence_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "description":
"Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety",
"function": "analyze_text", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools":
["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state":
"stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection":
{"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type":
"python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py",
"function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false,
"tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs":
{"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py",
"function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP",
"description": "Please specify the groundtruth column, which contains the
true label to the outputs that your flow produces.", "is_chat_input": false},
"prediction": {"type": "string", "default": "APP", "description": "Please
specify the prediction column, which contains the predicted outputs that your
flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string",
"reference": "${grade.output}", "evaluation_only": false, "is_chat_output":
false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name",
"flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"},
"outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "1885cd85-1969-4fca-8c21-d8826ed5d886",
"studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '13872'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.526'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source":
{"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}",
"prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false},
{"name": "calculate_accuracy", "type": "python", "source": {"type": "code",
"path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"},
"tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content
Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type":
["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "hate_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "self_harm_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "sexual_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "violence_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "description":
"Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety",
"function": "analyze_text", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools":
["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state":
"stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection":
{"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type":
"python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py",
"function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false,
"tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs":
{"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py",
"function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP",
"description": "Please specify the groundtruth column, which contains the
true label to the outputs that your flow produces.", "is_chat_input": false},
"prediction": {"type": "string", "default": "APP", "description": "Please
specify the prediction column, which contains the predicted outputs that your
flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string",
"reference": "${grade.output}", "evaluation_only": false, "is_chat_output":
false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name",
"flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"},
"outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "1885cd85-1969-4fca-8c21-d8826ed5d886",
"studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '13872'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.270'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source":
{"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}",
"prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false},
{"name": "calculate_accuracy", "type": "python", "source": {"type": "code",
"path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"},
"tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content
Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type":
["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "hate_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "self_harm_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "sexual_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "violence_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "description":
"Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety",
"function": "analyze_text", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools":
["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state":
"stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection":
{"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type":
"python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py",
"function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false,
"tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs":
{"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py",
"function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP",
"description": "Please specify the groundtruth column, which contains the
true label to the outputs that your flow produces.", "is_chat_input": false},
"prediction": {"type": "string", "default": "APP", "description": "Please
specify the prediction column, which contains the predicted outputs that your
flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string",
"reference": "${grade.output}", "evaluation_only": false, "is_chat_output":
false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name",
"flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"},
"outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "1885cd85-1969-4fca-8c21-d8826ed5d886",
"studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '13872'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.231'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "grade", "type": "python", "source":
{"type": "code", "path": "grade.py"}, "inputs": {"groundtruth": "${inputs.groundtruth}",
"prediction": "${inputs.prediction}"}, "tool": "grade.py", "reduce": false},
{"name": "calculate_accuracy", "type": "python", "source": {"type": "code",
"path": "calculate_accuracy.py"}, "inputs": {"grades": "${grade.output}"},
"tool": "calculate_accuracy.py", "reduce": true}], "tools": [{"name": "Content
Safety (Text Analyze)", "type": "python", "inputs": {"connection": {"type":
["AzureContentSafetyConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "hate_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "self_harm_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "sexual_category": {"type": ["string"], "default":
"medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "violence_category":
{"type": ["string"], "default": "medium_sensitivity", "enum": ["disable",
"low_sensitivity", "medium_sensitivity", "high_sensitivity"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "description":
"Use Azure Content Safety to detect harmful content.", "module": "promptflow.tools.azure_content_safety",
"function": "analyze_text", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "enable_kwargs": false, "deprecated_tools":
["content_safety_text.tools.content_safety_text_tool.analyze_text"], "tool_state":
"stable"}, {"name": "Embedding", "type": "python", "inputs": {"connection":
{"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "calculate_accuracy.py", "type":
"python", "inputs": {"grades": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "calculate_accuracy.py",
"function": "calculate_accuracy", "is_builtin": false, "enable_kwargs": false,
"tool_state": "stable"}, {"name": "grade.py", "type": "python", "inputs":
{"groundtruth": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "prediction": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}}, "source": "grade.py",
"function": "grade", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"groundtruth": {"type": "string", "default": "APP",
"description": "Please specify the groundtruth column, which contains the
true label to the outputs that your flow produces.", "is_chat_input": false},
"prediction": {"type": "string", "default": "APP", "description": "Please
specify the prediction column, which contains the predicted outputs that your
flow produces.", "is_chat_input": false}}, "outputs": {"grade": {"type": "string",
"reference": "${grade.output}", "evaluation_only": false, "is_chat_output":
false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/eval_run_name/flowRuns/eval_run_name",
"flowRunId": "eval_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-eval-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.result}"},
"outputDatastoreName": "workspaceblobstore", "childRunBasePath": "promptflow/PromptFlowArtifacts/eval_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "1885cd85-1969-4fca-8c21-d8826ed5d886",
"studioPortalEndpoint": "https://ml.azure.com/runs/eval_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '13872'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.534'
status:
code: 200
message: OK
- request:
body: '{"runId": "batch_run_name", "selectRunMetadata": true, "selectRunDefinition":
true, "selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705045909, "rootRunId": "batch_run_name",
"createdUtc": "2024-01-12T07:51:49.7910367+00:00", "createdBy": {"userObjectId":
"00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 6,
"statusRevision": 3, "runUuid": "de4d3748-43e9-499d-bacb-28bacd51d7dd", "parentRunUuid":
null, "rootRunUuid": "de4d3748-43e9-499d-bacb-28bacd51d7dd", "lastStartTimeUtc":
null, "currentComputeTime": null, "computeDuration": "00:00:04.0496282", "effectiveStartTimeUtc":
null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe",
"upn": null}, "lastModifiedUtc": "2024-01-12T07:52:10.1905954+00:00", "duration":
"00:00:04.0496282", "cancelationReason": null, "currentAttemptId": 1, "runId":
"batch_run_name", "parentRunId": null, "experimentId": "b1e733a1-2a5f-4c17-bc34-4d66d2858228",
"status": "Completed", "startTimeUtc": "2024-01-12T07:52:07.2312941+00:00",
"endTimeUtc": "2024-01-12T07:52:11.2809223+00:00", "scheduleId": null, "displayName":
"sdk-cli-test-fixture-batch-run-without-llm", "name": null, "dataContainerId":
"dcid.batch_run_name", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun",
"runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow",
"computeType": "AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name":
"test-runtime-ci", "azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name":
"flow.dag.yaml", "azureml.promptflow.session_id": "bee356189f7e7f18671a79369c78df4cfb1bbd0c99069074",
"azureml.promptflow.flow_lineage_id": "f7ee724d91e4f4a7501bdc0b66995bc8b57f86b3a526fa2a81c34ebcccbbd912",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/36774154bc3ecde4aa21054b3052221f/hello-world/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl",
"azureml.promptflow.inputs_mapping": "{\"name\":\"${data.url}\"}", "_azureml.evaluation_run":
"promptflow.BatchRun", "azureml.promptflow.snapshot_id": "38053316-7591-4ac8-b718-d76b16b48bbe",
"azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\":
\"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris":
{}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [],
"tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets":
[], "runDefinition": null, "jobSpecification": null, "primaryMetricName":
null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri":
null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId":
"azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '4649'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.052'
status:
code: 200
message: OK
- request:
body: '{"runId": "eval_run_name", "selectRunMetadata": true, "selectRunDefinition":
true, "selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705045961, "rootRunId": "eval_run_name",
"createdUtc": "2024-01-12T07:52:41.9028361+00:00", "createdBy": {"userObjectId":
"00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 6,
"statusRevision": 3, "runUuid": "b588472b-0935-433f-9bcb-b728c4f01254", "parentRunUuid":
null, "rootRunUuid": "b588472b-0935-433f-9bcb-b728c4f01254", "lastStartTimeUtc":
null, "currentComputeTime": null, "computeDuration": "00:00:04.3642942", "effectiveStartTimeUtc":
null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe",
"upn": null}, "lastModifiedUtc": "2024-01-12T07:53:02.5591186+00:00", "duration":
"00:00:04.3642942", "cancelationReason": null, "currentAttemptId": 1, "runId":
"eval_run_name", "parentRunId": null, "experimentId": "7bdec279-f99c-4ed3-b0b8-dd75698b8fd0",
"status": "Completed", "startTimeUtc": "2024-01-12T07:52:58.9604157+00:00",
"endTimeUtc": "2024-01-12T07:53:03.3247099+00:00", "scheduleId": null, "displayName":
"sdk-cli-test-fixture-eval-run-without-llm", "name": null, "dataContainerId":
"dcid.eval_run_name", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun",
"runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow",
"computeType": "AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name":
"test-runtime-ci", "azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name":
"flow.dag.yaml", "azureml.promptflow.session_id": "f8e4236a4e78e7f7125bbd811ec7976cb330412723a530f8",
"azureml.promptflow.flow_lineage_id": "26c575d863a85371ef937096728441d8c68c3e737b5a1bfeae5ac8f3b9ccb048",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/1aa3064d06f6170abbc488cc35c713b9/eval-classification-accuracy/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl",
"azureml.promptflow.input_run_id": "batch_run_name", "azureml.promptflow.inputs_mapping":
"{\"groundtruth\":\"${data.answer}\",\"prediction\":\"${run.outputs.result}\"}",
"_azureml.evaluation_run": "promptflow.BatchRun", "azureml.promptflow.snapshot_id":
"1885cd85-1969-4fca-8c21-d8826ed5d886", "azureml.promptflow.total_tokens":
"0", "_azureml.evaluate_artifacts": "[{\"path\": \"instance_results.jsonl\",
\"type\": \"table\"}]"}, "parameters": {}, "actionUris": {}, "scriptName":
null, "target": null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings":
{}, "services": {}, "inputDatasets": [], "outputDatasets": [], "runDefinition":
null, "jobSpecification": null, "primaryMetricName": null, "createdFrom":
null, "cancelUri": null, "completeUri": null, "diagnosticsUri": null, "computeRequest":
null, "compute": null, "retainForLifetimeOfWorkspace": false, "queueingInfo":
null, "inputs": null, "outputs": {"debug_info": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '4797'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.045'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name/logContent
response:
body:
string: '"2024-01-12 07:51:54 +0000 49 promptflow-runtime INFO [batch_run_name]
Receiving v2 bulk run request f3011800-5f03-4911-b5a1-b1f3cf942c03: {\"flow_id\":
\"batch_run_name\", \"flow_run_id\": \"batch_run_name\", \"flow_source\":
{\"flow_source_type\": 1, \"flow_source_info\": {\"snapshot_id\": \"38053316-7591-4ac8-b718-d76b16b48bbe\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.batch_run_name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A41%3A49Z&ske=2024-01-13T15%3A51%3A49Z&sks=b&skv=2019-07-07&st=2024-01-12T07%3A41%3A53Z&se=2024-01-12T15%3A51%3A53Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl\"},
\"inputs_mapping\": {\"name\": \"${data.url}\"}, \"azure_storage_setting\":
{\"azure_storage_mode\": 1, \"storage_account_name\": \"promptfloweast4063704120\",
\"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\",
\"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/batch_run_name\",
\"blob_container_sas_token\": \"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A51%3A54Z&ske=2024-01-19T07%3A51%3A54Z&sks=b&skv=2019-07-07&se=2024-01-19T07%3A51%3A54Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 07:51:54 +0000 49
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 07:51:54 +0000 49 promptflow-runtime INFO Updating
batch_run_name to Status.Preparing...\n2024-01-12 07:51:55 +0000 49 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/39649/requests/batch_run_name\n2024-01-12
07:51:55 +0000 49 promptflow-runtime INFO Get snapshot sas url for
38053316-7591-4ac8-b718-d76b16b48bbe...\n2024-01-12 07:52:01 +0000 49
promptflow-runtime INFO Downloading snapshot 38053316-7591-4ac8-b718-d76b16b48bbe
from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/38053316-7591-4ac8-b718-d76b16b48bbe.zip...\n2024-01-12
07:52:01 +0000 49 promptflow-runtime INFO Downloaded file /mnt/host/service/app/39649/requests/batch_run_name/38053316-7591-4ac8-b718-d76b16b48bbe.zip
with size 495 for snapshot 38053316-7591-4ac8-b718-d76b16b48bbe.\n2024-01-12
07:52:01 +0000 49 promptflow-runtime INFO Download snapshot 38053316-7591-4ac8-b718-d76b16b48bbe
completed.\n2024-01-12 07:52:01 +0000 49 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/39649/requests/batch_run_name\n2024-01-12
07:52:01 +0000 49 promptflow-runtime INFO About to execute a python
flow.\n2024-01-12 07:52:01 +0000 49 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-12 07:52:01 +0000 49 promptflow-runtime
INFO Starting to check process 2809 status for run batch_run_name\n2024-01-12
07:52:01 +0000 49 promptflow-runtime INFO Start checking run status
for run batch_run_name\n2024-01-12 07:52:05 +0000 2809 promptflow-runtime
INFO [49--2809] Start processing flowV2......\n2024-01-12 07:52:05 +0000 2809
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 07:52:05 +0000 2809 promptflow-runtime INFO Setting
mlflow tracking uri...\n2024-01-12 07:52:05 +0000 2809 promptflow-runtime
INFO Validating ''AzureML Data Scientist'' user authentication...\n2024-01-12
07:52:06 +0000 2809 promptflow-runtime INFO Successfully validated
''AzureML Data Scientist'' user authentication.\n2024-01-12 07:52:06 +0000 2809
promptflow-runtime INFO Using AzureMLRunStorageV2\n2024-01-12 07:52:06
+0000 2809 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
07:52:06 +0000 2809 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-12 07:52:06 +0000 2809 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
07:52:06 +0000 2809 promptflow-runtime INFO Resolve data from url finished
in 0.4533178601413965 seconds\n2024-01-12 07:52:06 +0000 2809 promptflow-runtime
INFO Starting the aml run ''batch_run_name''...\n2024-01-12 07:52:07 +0000 2809
execution.bulk INFO Using fork, process count: 3\n2024-01-12 07:52:07
+0000 2850 execution.bulk INFO Process 2850 started.\n2024-01-12
07:52:07 +0000 2856 execution.bulk INFO Process 2856 started.\n2024-01-12
07:52:07 +0000 2809 execution.bulk INFO Process name: ForkProcess-32:2,
Process id: 2850, Line number: 0 start execution.\n2024-01-12 07:52:07 +0000 2809
execution.bulk INFO Process name: ForkProcess-32:3, Process id: 2856,
Line number: 1 start execution.\n2024-01-12 07:52:07 +0000 2809 execution.bulk INFO Process
name: ForkProcess-32:2, Process id: 2850, Line number: 0 completed.\n2024-01-12
07:52:07 +0000 2809 execution.bulk INFO Finished 1 / 3 lines.\n2024-01-12
07:52:07 +0000 2809 execution.bulk INFO Average execution time
for completed lines: 0.2 seconds. Estimated time for incomplete lines: 0.4
seconds.\n2024-01-12 07:52:07 +0000 2861 execution.bulk INFO Process
2861 started.\n2024-01-12 07:52:07 +0000 2809 execution.bulk INFO Process
name: ForkProcess-32:2, Process id: 2850, Line number: 2 start execution.\n2024-01-12
07:52:07 +0000 2809 execution.bulk INFO Process name: ForkProcess-32:3,
Process id: 2856, Line number: 1 completed.\n2024-01-12 07:52:07 +0000 2809
execution.bulk INFO Finished 2 / 3 lines.\n2024-01-12 07:52:07 +0000 2809
execution.bulk INFO Average execution time for completed lines: 0.14
seconds. Estimated time for incomplete lines: 0.14 seconds.\n2024-01-12 07:52:07
+0000 2809 execution.bulk INFO Process name: ForkProcess-32:2,
Process id: 2850, Line number: 2 completed.\n2024-01-12 07:52:07 +0000 2809
execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12 07:52:07 +0000 2809
execution.bulk INFO Average execution time for completed lines: 0.11
seconds. Estimated time for incomplete lines: 0.0 seconds.\n2024-01-12 07:52:10
+0000 2809 execution.bulk INFO Upload status summary metrics for
run batch_run_name finished in 1.258488142862916 seconds\n2024-01-12 07:52:10
+0000 2809 promptflow-runtime INFO Successfully write run properties
{\"azureml.promptflow.total_tokens\": 0, \"_azureml.evaluate_artifacts\":
\"[{\\\"path\\\": \\\"instance_results.jsonl\\\", \\\"type\\\": \\\"table\\\"}]\"}
with run id ''batch_run_name''\n2024-01-12 07:52:10 +0000 2809 execution.bulk INFO Upload
RH properties for run batch_run_name finished in 0.0779735017567873 seconds\n2024-01-12
07:52:10 +0000 2809 promptflow-runtime INFO Creating unregistered output
Asset for Run batch_run_name...\n2024-01-12 07:52:10 +0000 2809 promptflow-runtime
INFO Created debug_info Asset: azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1\n2024-01-12
07:52:10 +0000 2809 promptflow-runtime INFO Creating unregistered output
Asset for Run batch_run_name...\n2024-01-12 07:52:10 +0000 2809 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1\n2024-01-12
07:52:10 +0000 2809 promptflow-runtime INFO Creating Artifact for Run
batch_run_name...\n2024-01-12 07:52:11 +0000 2809 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-12 07:52:11 +0000 2809 promptflow-runtime
INFO Patching batch_run_name...\n2024-01-12 07:52:11 +0000 2809 promptflow-runtime
INFO Ending the aml run ''batch_run_name'' with status ''Completed''...\n2024-01-12
07:52:12 +0000 49 promptflow-runtime INFO Process 2809 finished\n2024-01-12
07:52:12 +0000 49 promptflow-runtime INFO [49] Child process finished!\n2024-01-12
07:52:12 +0000 49 promptflow-runtime INFO [batch_run_name] End processing
bulk run\n2024-01-12 07:52:12 +0000 49 promptflow-runtime INFO Cleanup
working dir /mnt/host/service/app/39649/requests/batch_run_name for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '9813'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.829'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name/logContent
response:
body:
string: '"2024-01-12 07:52:45 +0000 49 promptflow-runtime INFO [eval_run_name]
Receiving v2 bulk run request 9ada5207-1c07-4048-8721-8a87560f52bf: {\"flow_id\":
\"eval_run_name\", \"flow_run_id\": \"eval_run_name\", \"flow_source\": {\"flow_source_type\":
1, \"flow_source_info\": {\"snapshot_id\": \"1885cd85-1969-4fca-8c21-d8826ed5d886\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.eval_run_name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A41%3A49Z&ske=2024-01-13T15%3A51%3A49Z&sks=b&skv=2019-07-07&st=2024-01-12T07%3A42%3A44Z&se=2024-01-12T15%3A52%3A44Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl\",
\"run.outputs\": \"azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1\"},
\"inputs_mapping\": {\"groundtruth\": \"${data.answer}\", \"prediction\":
\"${run.outputs.result}\"}, \"azure_storage_setting\": {\"azure_storage_mode\":
1, \"storage_account_name\": \"promptfloweast4063704120\", \"blob_container_name\":
\"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\", \"flow_artifacts_root_path\":
\"promptflow/PromptFlowArtifacts/eval_run_name\", \"blob_container_sas_token\":
\"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A52%3A45Z&ske=2024-01-19T07%3A52%3A45Z&sks=b&skv=2019-07-07&se=2024-01-19T07%3A52%3A45Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 07:52:45 +0000 49
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 07:52:45 +0000 49 promptflow-runtime INFO Updating
eval_run_name to Status.Preparing...\n2024-01-12 07:52:45 +0000 49 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/39649/requests/eval_run_name\n2024-01-12
07:52:45 +0000 49 promptflow-runtime INFO Get snapshot sas url for
1885cd85-1969-4fca-8c21-d8826ed5d886...\n2024-01-12 07:52:52 +0000 49
promptflow-runtime INFO Downloading snapshot 1885cd85-1969-4fca-8c21-d8826ed5d886
from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/1885cd85-1969-4fca-8c21-d8826ed5d886.zip...\n2024-01-12
07:52:52 +0000 49 promptflow-runtime INFO Downloaded file /mnt/host/service/app/39649/requests/eval_run_name/1885cd85-1969-4fca-8c21-d8826ed5d886.zip
with size 1243 for snapshot 1885cd85-1969-4fca-8c21-d8826ed5d886.\n2024-01-12
07:52:52 +0000 49 promptflow-runtime INFO Download snapshot 1885cd85-1969-4fca-8c21-d8826ed5d886
completed.\n2024-01-12 07:52:52 +0000 49 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/39649/requests/eval_run_name\n2024-01-12
07:52:52 +0000 49 promptflow-runtime INFO About to execute a python
flow.\n2024-01-12 07:52:52 +0000 49 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-12 07:52:52 +0000 49 promptflow-runtime
INFO Starting to check process 2911 status for run eval_run_name\n2024-01-12
07:52:52 +0000 49 promptflow-runtime INFO Start checking run status
for run eval_run_name\n2024-01-12 07:52:56 +0000 2911 promptflow-runtime
INFO [49--2911] Start processing flowV2......\n2024-01-12 07:52:56 +0000 2911
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 07:52:56 +0000 2911 promptflow-runtime INFO Setting
mlflow tracking uri...\n2024-01-12 07:52:56 +0000 2911 promptflow-runtime
INFO Validating ''AzureML Data Scientist'' user authentication...\n2024-01-12
07:52:56 +0000 2911 promptflow-runtime INFO Successfully validated
''AzureML Data Scientist'' user authentication.\n2024-01-12 07:52:56 +0000 2911
promptflow-runtime INFO Using AzureMLRunStorageV2\n2024-01-12 07:52:56
+0000 2911 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
07:52:56 +0000 2911 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-12 07:52:57 +0000 2911 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
07:52:57 +0000 2911 promptflow-runtime INFO Resolve data from url finished
in 0.5648300694301724 seconds\n2024-01-12 07:52:58 +0000 2911 promptflow-runtime
INFO Resolve data from url finished in 0.6205331217497587 seconds\n2024-01-12
07:52:58 +0000 2911 promptflow-runtime INFO Starting the aml run ''eval_run_name''...\n2024-01-12
07:52:59 +0000 2911 execution.bulk INFO Using fork, process count:
3\n2024-01-12 07:52:59 +0000 2954 execution.bulk INFO Process 2954
started.\n2024-01-12 07:52:59 +0000 2958 execution.bulk INFO Process
2958 started.\n2024-01-12 07:52:59 +0000 2911 execution.bulk INFO Process
name: ForkProcess-34:2, Process id: 2954, Line number: 0 start execution.\n2024-01-12
07:52:59 +0000 2911 execution.bulk INFO Process name: ForkProcess-34:3,
Process id: 2958, Line number: 1 start execution.\n2024-01-12 07:52:59 +0000 2911
execution.bulk INFO Process name: ForkProcess-34:2, Process id: 2954,
Line number: 0 completed.\n2024-01-12 07:52:59 +0000 2911 execution.bulk INFO Finished
1 / 3 lines.\n2024-01-12 07:52:59 +0000 2911 execution.bulk INFO Average
execution time for completed lines: 0.21 seconds. Estimated time for incomplete
lines: 0.42 seconds.\n2024-01-12 07:52:59 +0000 2911 execution.bulk INFO Process
name: ForkProcess-34:2, Process id: 2954, Line number: 2 start execution.\n2024-01-12
07:52:59 +0000 2911 execution.bulk INFO Process name: ForkProcess-34:3,
Process id: 2958, Line number: 1 completed.\n2024-01-12 07:52:59 +0000 2911
execution.bulk INFO Finished 2 / 3 lines.\n2024-01-12 07:52:59 +0000 2961
execution.bulk INFO Process 2961 started.\n2024-01-12 07:52:59 +0000 2911
execution.bulk INFO Average execution time for completed lines: 0.13
seconds. Estimated time for incomplete lines: 0.13 seconds.\n2024-01-12 07:52:59
+0000 2911 execution.bulk INFO Process name: ForkProcess-34:2,
Process id: 2954, Line number: 2 completed.\n2024-01-12 07:52:59 +0000 2911
execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12 07:52:59 +0000 2911
execution.bulk INFO Average execution time for completed lines: 0.12
seconds. Estimated time for incomplete lines: 0.0 seconds.\n2024-01-12 07:53:00
+0000 2911 execution.bulk INFO Executing aggregation nodes...\n2024-01-12
07:53:00 +0000 2911 execution.bulk INFO Finish executing aggregation
nodes.\n2024-01-12 07:53:02 +0000 2911 execution.bulk INFO Upload
status summary metrics for run eval_run_name finished in 1.4952670317143202
seconds\n2024-01-12 07:53:02 +0000 2911 execution.bulk INFO Upload
metrics for run eval_run_name finished in 0.3417100487276912 seconds\n2024-01-12
07:53:02 +0000 2911 promptflow-runtime INFO Successfully write run
properties {\"azureml.promptflow.total_tokens\": 0, \"_azureml.evaluate_artifacts\":
\"[{\\\"path\\\": \\\"instance_results.jsonl\\\", \\\"type\\\": \\\"table\\\"}]\"}
with run id ''eval_run_name''\n2024-01-12 07:53:02 +0000 2911 execution.bulk INFO Upload
RH properties for run eval_run_name finished in 0.09185165446251631 seconds\n2024-01-12
07:53:02 +0000 2911 promptflow-runtime INFO Creating unregistered output
Asset for Run eval_run_name...\n2024-01-12 07:53:02 +0000 2911 promptflow-runtime
INFO Created debug_info Asset: azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_debug_info/versions/1\n2024-01-12
07:53:02 +0000 2911 promptflow-runtime INFO Creating unregistered output
Asset for Run eval_run_name...\n2024-01-12 07:53:03 +0000 2911 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_flow_outputs/versions/1\n2024-01-12
07:53:03 +0000 2911 promptflow-runtime INFO Creating Artifact for Run
eval_run_name...\n2024-01-12 07:53:03 +0000 2911 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-12 07:53:03 +0000 2911 promptflow-runtime
INFO Patching eval_run_name...\n2024-01-12 07:53:03 +0000 2911 promptflow-runtime
INFO Ending the aml run ''eval_run_name'' with status ''Completed''...\n2024-01-12
07:53:04 +0000 49 promptflow-runtime INFO Process 2911 finished\n2024-01-12
07:53:04 +0000 49 promptflow-runtime INFO [49] Child process finished!\n2024-01-12
07:53:04 +0000 49 promptflow-runtime INFO [eval_run_name] End processing
bulk run\n2024-01-12 07:53:04 +0000 49 promptflow-runtime INFO Cleanup
working dir /mnt/host/service/app/39649/requests/eval_run_name for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '10617'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.503'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_run.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_run.yaml",
"repo_id": "promptflow",
"token_count": 89116
} | 74 |
flow: ../flows/flow_with_dict_input
data: ../datas/webClassification1.jsonl
column_mapping:
key:
val1: 1
val2: 2
url: ${data.url}
| promptflow/src/promptflow/tests/test_configs/runs/input_with_dict_val.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/runs/input_with_dict_val.yaml",
"repo_id": "promptflow",
"token_count": 70
} | 75 |
from promptflow import tool
@tool
def divide_num(num: int):
return UnserializableClass(num=(int)(num / 2))
class UnserializableClass:
def __init__(self, num: int):
self.num = num
def __str__(self):
return str(self.num) | promptflow/src/promptflow/tests/test_configs/wrong_flows/flow_output_unserializable/divide_num.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/flow_output_unserializable/divide_num.py",
"repo_id": "promptflow",
"token_count": 99
} | 76 |
Subsets and Splits