text
stringlengths 8
1.72M
| id
stringlengths 22
143
| metadata
dict | __index_level_0__
int64 0
104
|
---|---|---|---|
from promptflow import tool
@tool
def passthrough_dict(image_list: list, image_dict: dict):
return image_dict
| promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_dict.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_dict.py",
"repo_id": "promptflow",
"token_count": 38
} | 78 |
import random
from promptflow.contracts.multimedia import Image
from promptflow import tool
@tool
def pick_an_image(image_1: Image, image_2: Image) -> Image:
if random.choice([True, False]):
return image_1
else:
return image_2
| promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/pick_an_image.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/pick_an_image.py",
"repo_id": "promptflow",
"token_count": 93
} | 79 |
inputs:
text:
type: string
default: dummy_input
outputs:
output_prompt:
type: string
reference: ${sync_fail.output}
nodes:
- name: sync_fail
type: python
source:
type: code
path: sync_fail.py
inputs:
s: ${inputs.text}
| promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/sync_tools_failures/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 109
} | 80 |
[
{
"url": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h"
},
{
"url": "https://www.microsoft.com/en-us/windows/"
}
]
| promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/samples.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_with_exception/samples.json",
"repo_id": "promptflow",
"token_count": 86
} | 81 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.046'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.085'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.122'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.145'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:06:13 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/simple_eager_flow_data.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '25'
content-md5:
- zt1zN1V/HR5p7N0Sh5396w==
content-type:
- application/octet-stream
last-modified:
- Tue, 23 Jan 2024 06:27:00 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Tue, 23 Jan 2024 06:26:59 GMT
x-ms-meta-name:
- 1e376ce4-7c3b-4683-82ad-412f5cd23626
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 7e65351c-7e4b-4a4d-90f8-304eacdc36bc
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:06:16 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/simple_eager_flow_data.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.136'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.0 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.116'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:06:19 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/simple_with_req/entry.py
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '331'
content-md5:
- bf0G3F/eNgZO8UPfGebSUQ==
content-type:
- application/octet-stream
last-modified:
- Thu, 25 Jan 2024 08:57:55 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Thu, 25 Jan 2024 08:57:54 GMT
x-ms-meta-name:
- c42d946f-2978-4455-8a89-b768c66a9277
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.11.5 (Windows-10-10.0.22621-SP0)
x-ms-date:
- Thu, 25 Jan 2024 09:06:23 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/simple_with_req/entry.py
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/simple_with_req/flow.dag.yaml",
"runId": "name", "runDisplayName": "name", "runExperimentName": "", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/simple_eager_flow_data.jsonl"},
"inputsMapping": {}, "connections": {}, "environmentVariables": {}, "runtimeName":
"fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '791'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '6.962'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "d06599ce-0829-42a7-a641-370cd112bc25",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.256'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "d06599ce-0829-42a7-a641-370cd112bc25",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.369'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "d06599ce-0829-42a7-a641-370cd112bc25",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.195'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "d06599ce-0829-42a7-a641-370cd112bc25",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.154'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "d06599ce-0829-42a7-a641-370cd112bc25",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.413'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/childRuns?endIndex=24&startIndex=0
response:
body:
string: '[{"run_id": "name_0", "status": "Completed", "error": null, "inputs":
{"input_val": "input1"}, "output": {"output": "Hello world! input1"}, "metrics":
null, "request": null, "parent_run_id": "name", "root_run_id": "name", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-25T09:06:53.027652Z",
"end_time": "2024-01-25T09:06:53.029602Z", "index": 0, "api_calls": [{"name":
"my_flow", "type": "Function", "inputs": {"input_val": "input1"}, "output":
"Hello world! input1", "start_time": 1706173613.028362, "end_time": 1706173613.029179,
"error": null, "children": [], "node_name": null, "parent_id": "", "id": "8f02508e-dc43-4e37-a697-78946dcf2c03"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.00195}, "result": {"output": "Hello world! input1"}, "upload_metrics":
false}]'
headers:
connection:
- keep-alive
content-length:
- '877'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.905'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/childRuns?endIndex=49&startIndex=25
response:
body:
string: '[]'
headers:
connection:
- keep-alive
content-length:
- '2'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '0.675'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/name/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "automatic",
"inputsMapping": {}, "outputDatastoreName": "workspaceblobstore", "childRunBasePath":
"promptflow/PromptFlowArtifacts/name/flow_artifacts", "flowDagFileRelativePath":
"flow.dag.yaml", "flowSnapshotId": "d06599ce-0829-42a7-a641-370cd112bc25",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '1028'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.417'
status:
code: 200
message: OK
- request:
body: '{}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '2'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/metric/v2.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/runs/name/lastvalues
response:
body:
string: '{"value": [{"dataContainerId": "dcid.name", "name": "__pf__.lines.completed",
"columns": {"__pf__.lines.completed": "Double"}, "properties": {"uxMetricType":
"azureml.v1.scalar", "dataLocation": null}, "namespace": null, "standardSchemaId":
null, "value": [{"metricId": "3b56e572-949e-4cdf-9691-3fd55fe5470a", "createdUtc":
"2024-01-25T09:06:55.178+00:00", "step": 0, "data": {"__pf__.lines.completed":
1.0}}]}, {"dataContainerId": "dcid.name", "name": "__pf__.lines.failed", "columns":
{"__pf__.lines.failed": "Double"}, "properties": {"uxMetricType": "azureml.v1.scalar",
"dataLocation": null}, "namespace": null, "standardSchemaId": null, "value":
[{"metricId": "1b966973-ccb1-450f-82d8-cf17745b01b3", "createdUtc": "2024-01-25T09:06:55.662+00:00",
"step": 0, "data": {"__pf__.lines.failed": 0.0}}]}]}'
headers:
connection:
- keep-alive
content-length:
- '1240'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '1.547'
status:
code: 200
message: OK
- request:
body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true,
"selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1706173599, "rootRunId": "name", "createdUtc":
"2024-01-25T09:06:39.0044382+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": "100320005227D154", "userIdp": null, "userAltSecId": null, "userIss":
"https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "Han Wang", "upn": null},
"userId": "00000000-0000-0000-0000-000000000000", "token": null, "tokenExpiryTimeUtc":
null, "error": null, "warnings": null, "revision": 6, "statusRevision": 3,
"runUuid": "7db32729-6b4a-426b-9d8a-daf8cf60ba28", "parentRunUuid": null,
"rootRunUuid": "7db32729-6b4a-426b-9d8a-daf8cf60ba28", "lastStartTimeUtc":
null, "currentComputeTime": null, "computeDuration": "00:00:08.3010965", "effectiveStartTimeUtc":
null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": "100320005227D154", "userIdp": null, "userAltSecId": null, "userIss":
"https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "Han Wang", "upn": "[email protected]"},
"lastModifiedUtc": "2024-01-25T09:06:56.1006972+00:00", "duration": "00:00:08.3010965",
"cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId":
null, "experimentId": "64465848-e4a8-42a2-a617-d7f0fcda6f32", "status": "Completed",
"startTimeUtc": "2024-01-25T09:06:48.6457164+00:00", "endTimeUtc": "2024-01-25T09:06:56.9468129+00:00",
"scheduleId": null, "displayName": "name", "name": null, "dataContainerId":
"dcid.name", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun",
"runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow",
"computeType": null}, "properties": {"azureml.promptflow.runtime_name": "automatic",
"azureml.promptflow.runtime_version": "20240116.v1", "azureml.promptflow.definition_file_name":
"flow.dag.yaml", "azureml.promptflow.flow_lineage_id": "63d67b0b61e34b0527b5f3c46dfc953854138b2ffebc68175580285dc2d95663",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/79819672296a1785a95c65c8c0e75b0d/simple_with_req/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl",
"_azureml.evaluation_run": "promptflow.BatchRun", "azureml.promptflow.session_id":
"b905697d9d04e1b8c87c12d30eb37326380d5cfeb7d0500e", "azureml.promptflow.snapshot_id":
"d06599ce-0829-42a7-a641-370cd112bc25", "azureml.promptflow.run_mode": "Eager",
"azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\":
\"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris":
{}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [],
"tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets":
[], "runDefinition": null, "jobSpecification": null, "primaryMetricName":
null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri":
null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId":
"azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '4485'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.030'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.11.5 (Windows-10-10.0.22621-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/logContent
response:
body:
string: '"2024-01-25 09:06:42 +0000 162 promptflow-runtime INFO [name]
Receiving v2 bulk run request 393a559d-2afa-4415-ae87-b7ec7966ab02: {\"flow_id\":
\"name\", \"flow_run_id\": \"name\", \"flow_source\": {\"flow_source_type\":
1, \"flow_source_info\": {\"snapshot_id\": \"d06599ce-0829-42a7-a641-370cd112bc25\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-25T08%3A48%3A12Z&ske=2024-01-26T16%3A58%3A12Z&sks=b&skv=2019-07-07&st=2024-01-25T08%3A56%3A40Z&se=2024-01-25T17%3A06%3A40Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/e62bc4d5a164939b21d42dd420469da7/simple_eager_flow_data.jsonl\"},
\"azure_storage_setting\": {\"azure_storage_mode\": 1, \"storage_account_name\":
\"promptfloweast4063704120\", \"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\",
\"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/name\", \"blob_container_sas_token\":
\"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-25T09%3A06%3A40Z&ske=2024-02-01T09%3A06%3A40Z&sks=b&skv=2019-07-07&se=2024-02-01T09%3A06%3A40Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-25 09:06:42 +0000 162
promptflow-runtime INFO Runtime version: 20240116.v1. PromptFlow version:
0.0.116642424\n2024-01-25 09:06:42 +0000 162 promptflow-runtime INFO Updating
name to Status.Preparing...\n2024-01-25 09:06:42 +0000 162 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/42871/requests/name\n2024-01-25
09:06:42 +0000 162 promptflow-runtime INFO Get snapshot sas url for
d06599ce-0829-42a7-a641-370cd112bc25.\n2024-01-25 09:06:42 +0000 162 promptflow-runtime
INFO Snapshot d06599ce-0829-42a7-a641-370cd112bc25 contains 3 files.\n2024-01-25
09:06:42 +0000 162 promptflow-runtime INFO Download snapshot d06599ce-0829-42a7-a641-370cd112bc25
completed.\n2024-01-25 09:06:42 +0000 162 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/42871/requests/name\n2024-01-25
09:06:42 +0000 162 promptflow-runtime INFO About to execute a python
flow.\n2024-01-25 09:06:42 +0000 162 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-25 09:06:42 +0000 162 promptflow-runtime
INFO Starting to check process 305 status for run name\n2024-01-25 09:06:43
+0000 162 promptflow-runtime INFO Start checking run status for run
name\n2024-01-25 09:06:47 +0000 305 promptflow-runtime INFO [162--305]
Start processing flowV2......\n2024-01-25 09:06:47 +0000 305 promptflow-runtime
INFO Runtime version: 20240116.v1. PromptFlow version: 0.0.116642424\n2024-01-25
09:06:47 +0000 305 promptflow-runtime INFO Setting mlflow tracking
uri...\n2024-01-25 09:06:47 +0000 305 promptflow-runtime INFO Validating
''AzureML Data Scientist'' user authentication...\n2024-01-25 09:06:47 +0000 305
promptflow-runtime INFO Successfully validated ''AzureML Data Scientist''
user authentication.\n2024-01-25 09:06:47 +0000 305 promptflow-runtime
INFO Using AzureMLRunStorageV2\n2024-01-25 09:06:47 +0000 305 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-25
09:06:47 +0000 305 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-25 09:06:47 +0000 305 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-25
09:06:48 +0000 305 promptflow-runtime INFO Resolve data from url finished
in 0.603015494000033 seconds\n2024-01-25 09:06:48 +0000 305 promptflow-runtime
INFO Starting the aml run ''name''...\n2024-01-25 09:06:48 +0000 305
execution WARNING Starting run without column mapping may lead to
unexpected results. Please consult the following documentation for more information:
https://aka.ms/pf/column-mapping\n2024-01-25 09:06:48 +0000 305 execution.bulk INFO Set
process count to 1 by taking the minimum value among the factors of {''default_worker_count'':
4, ''row_count'': 1}.\n2024-01-25 09:06:52 +0000 305 execution.bulk INFO Process
name(ForkProcess-2:2:1)-Process id(375)-Line number(0) start execution.\n2024-01-25
09:06:53 +0000 305 execution.bulk INFO Process name(ForkProcess-2:2:1)-Process
id(375)-Line number(0) completed.\n2024-01-25 09:06:53 +0000 305 execution.bulk INFO Finished
1 / 1 lines.\n2024-01-25 09:06:53 +0000 305 execution.bulk INFO Average
execution time for completed lines: 5.0 seconds. Estimated time for incomplete
lines: 0.0 seconds.\n2024-01-25 09:06:55 +0000 305 promptflow-runtime
INFO Post processing batch result...\n2024-01-25 09:06:56 +0000 305
execution.bulk INFO Upload status summary metrics for run name finished
in 0.8607595070000116 seconds\n2024-01-25 09:06:56 +0000 305 promptflow-runtime
INFO Successfully write run properties {\"azureml.promptflow.total_tokens\":
0, \"_azureml.evaluate_artifacts\": \"[{\\\"path\\\": \\\"instance_results.jsonl\\\",
\\\"type\\\": \\\"table\\\"}]\"} with run id ''name''\n2024-01-25 09:06:56
+0000 305 execution.bulk INFO Upload RH properties for run name
finished in 0.07683961700001873 seconds\n2024-01-25 09:06:56 +0000 305
promptflow-runtime INFO Creating unregistered output Asset for Run name...\n2024-01-25
09:06:56 +0000 305 promptflow-runtime INFO Created debug_info Asset:
azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1\n2024-01-25
09:06:56 +0000 305 promptflow-runtime INFO Creating unregistered output
Asset for Run name...\n2024-01-25 09:06:56 +0000 305 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1\n2024-01-25
09:06:56 +0000 305 promptflow-runtime INFO Creating Artifact for Run
name...\n2024-01-25 09:06:56 +0000 305 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-25 09:06:56 +0000 305 promptflow-runtime
INFO Patching name...\n2024-01-25 09:06:56 +0000 305 promptflow-runtime
INFO Ending the aml run ''name'' with status ''Completed''...\n"'
headers:
connection:
- keep-alive
content-length:
- '7934'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.632'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_eager_flow_crud.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_eager_flow_crud.yaml",
"repo_id": "promptflow",
"token_count": 20704
} | 82 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.034'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.127'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceworkingdirectory
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceworkingdirectory",
"name": "workspaceworkingdirectory", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
false, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureFile", "accountName": "fake_account_name",
"fileShareName": "fake-file-share-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "None"}, "systemData": {"createdAt":
"2023-04-08T02:53:06.6001169+00:00", "createdBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"createdByType": "Application", "lastModifiedAt": "2023-04-08T02:53:07.2885525+00:00",
"lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", "lastModifiedByType":
"Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1161'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.088'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceworkingdirectory/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.178'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:46 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/LocalUpload?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceAlreadyExists</Code><Message>The
specified resource already exists.\nRequestId:6129333d-f01a-008a-1530-45563d000000\nTime:2024-01-12T08:20:47.2521286Z</Message></Error>"
headers:
content-length:
- '228'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceAlreadyExists
x-ms-version:
- '2023-08-03'
status:
code: 409
message: The specified resource already exists.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:48 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceAlreadyExists</Code><Message>The
specified resource already exists.\nRequestId:86b8d942-f01a-0103-4330-4512e5000000\nTime:2024-01-12T08:20:49.4233757Z</Message></Error>"
headers:
content-length:
- '228'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceAlreadyExists
x-ms-version:
- '2023-08-03'
status:
code: 409
message: The specified resource already exists.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:49 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceAlreadyExists</Code><Message>The
specified resource already exists.\nRequestId:ecf357ab-601a-000e-3630-45243c000000\nTime:2024-01-12T08:20:50.4359042Z</Message></Error>"
headers:
content-length:
- '228'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceAlreadyExists
x-ms-version:
- '2023-08-03'
status:
code: 409
message: The specified resource already exists.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:50 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user%2Fpromptflow?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceAlreadyExists</Code><Message>The
specified resource already exists.\nRequestId:c3caa163-701a-004f-3930-457cd8000000\nTime:2024-01-12T08:20:51.5247623Z</Message></Error>"
headers:
content-length:
- '228'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceAlreadyExists
x-ms-version:
- '2023-08-03'
status:
code: 409
message: The specified resource already exists.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:51 GMT
x-ms-version:
- '2023-08-03'
method: GET
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user%2Fpromptflow%2Fflow_name?restype=directory
response:
body:
string: "\uFEFF<?xml version=\"1.0\" encoding=\"utf-8\"?><Error><Code>ResourceNotFound</Code><Message>The
specified resource does not exist.\nRequestId:2e1ebc56-901a-00a3-7730-456849000000\nTime:2024-01-12T08:20:52.5499479Z</Message></Error>"
headers:
content-length:
- '223'
content-type:
- application/xml
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-error-code:
- ResourceNotFound
x-ms-version:
- '2023-08-03'
status:
code: 404
message: The specified resource does not exist.
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:52 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user%2Fpromptflow%2Fflow_name?restype=directory
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 08:20:53 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Directory
x-ms-file-change-time:
- '2024-01-12T08:20:53.5910056Z'
x-ms-file-creation-time:
- '2024-01-12T08:20:53.5910056Z'
x-ms-file-id:
- '13835071872191954944'
x-ms-file-last-write-time:
- '2024-01-12T08:20:53.5910056Z'
x-ms-file-parent-id:
- '10088082484072808448'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:53 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- inherit
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users%2Funknown_user%2Fpromptflow%2Fflow_name%2F__pycache__?restype=directory
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 08:20:54 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Directory
x-ms-file-change-time:
- '2024-01-12T08:20:54.6393991Z'
x-ms-file-creation-time:
- '2024-01-12T08:20:54.6393991Z'
x-ms-file-id:
- '13835142240936132608'
x-ms-file-last-write-time:
- '2024-01-12T08:20:54.6393991Z'
x-ms-file-parent-id:
- '13835071872191954944'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-content-length:
- '14'
x-ms-date:
- Fri, 12 Jan 2024 08:20:54 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- Inherit
x-ms-type:
- file
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/.gitattributes
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 08:20:55 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- '2024-01-12T08:20:55.6877941Z'
x-ms-file-creation-time:
- '2024-01-12T08:20:55.6877941Z'
x-ms-file-id:
- '13835107056564043776'
x-ms-file-last-write-time:
- '2024-01-12T08:20:55.6877941Z'
x-ms-file-parent-id:
- '13835071872191954944'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: '* text eol=lf
'
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '14'
Content-MD5:
- nYmkCopuDuFj82431amzZw==
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:55 GMT
x-ms-range:
- bytes=0-13
x-ms-version:
- '2023-08-03'
x-ms-write:
- update
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/.gitattributes?comp=range
response:
body:
string: ''
headers:
content-length:
- '0'
content-md5:
- nYmkCopuDuFj82431amzZw==
last-modified:
- Fri, 12 Jan 2024 08:20:56 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-last-write-time:
- '2024-01-12T08:20:56.7202574Z'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-content-length:
- '250'
x-ms-date:
- Fri, 12 Jan 2024 08:20:56 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- Inherit
x-ms-type:
- file
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/flow.dag.yaml
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 08:20:57 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- '2024-01-12T08:20:57.7935437Z'
x-ms-file-creation-time:
- '2024-01-12T08:20:57.7935437Z'
x-ms-file-id:
- '13835177425308221440'
x-ms-file-last-write-time:
- '2024-01-12T08:20:57.7935437Z'
x-ms-file-parent-id:
- '13835071872191954944'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: "inputs:\n name:\n type: string\n default: hod\noutputs:\n result:\n
\ type: string\n reference: ${hello_world.output}\nnodes:\n- name: hello_world\n
\ type: python\n source:\n type: code\n path: hello_world.py\n inputs:\n
\ name: ${inputs.name}\n"
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '250'
Content-MD5:
- CT1FTZp5JScB8fq+HjnINw==
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:57 GMT
x-ms-range:
- bytes=0-249
x-ms-version:
- '2023-08-03'
x-ms-write:
- update
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/flow.dag.yaml?comp=range
response:
body:
string: ''
headers:
content-length:
- '0'
content-md5:
- CT1FTZp5JScB8fq+HjnINw==
last-modified:
- Fri, 12 Jan 2024 08:20:58 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-last-write-time:
- '2024-01-12T08:20:58.8299884Z'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-content-length:
- '105'
x-ms-date:
- Fri, 12 Jan 2024 08:20:58 GMT
x-ms-file-attributes:
- none
x-ms-file-creation-time:
- now
x-ms-file-last-write-time:
- now
x-ms-file-permission:
- Inherit
x-ms-type:
- file
x-ms-version:
- '2023-08-03'
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/hello_world.py
response:
body:
string: ''
headers:
content-length:
- '0'
last-modified:
- Fri, 12 Jan 2024 08:20:59 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- '2024-01-12T08:20:59.8445310Z'
x-ms-file-creation-time:
- '2024-01-12T08:20:59.8445310Z'
x-ms-file-id:
- '13835089464377999360'
x-ms-file-last-write-time:
- '2024-01-12T08:20:59.8445310Z'
x-ms-file-parent-id:
- '13835071872191954944'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: "from promptflow import tool\n\n\n@tool\ndef hello_world(name: str) -> str:\n
\ return f\"Hello World {name}!\"\n"
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '105'
Content-MD5:
- fGMkkiZAjGs8PW/AMiYppA==
Content-Type:
- application/octet-stream
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:20:59 GMT
x-ms-range:
- bytes=0-104
x-ms-version:
- '2023-08-03'
x-ms-write:
- update
method: PUT
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/hello_world.py?comp=range
response:
body:
string: ''
headers:
content-length:
- '0'
content-md5:
- fGMkkiZAjGs8PW/AMiYppA==
last-modified:
- Fri, 12 Jan 2024 08:21:00 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-last-write-time:
- '2024-01-12T08:21:00.8660433Z'
x-ms-request-server-encrypted:
- 'true'
x-ms-version:
- '2023-08-03'
status:
code: 201
message: Created
- request:
body: '{"flowName": "flow_display_name", "description": "test flow description",
"tags": {"owner": "sdk-test"}, "flowDefinitionFilePath": "Users/unknown_user/promptflow/flow_name/flow.dag.yaml",
"flowType": "default"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '282'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/Flows
response:
body:
string: '{"eTag": {}, "studioPortalEndpoint": "https://ml.azure.com/prompts/flow/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/f3886259-e2d7-4acc-880a-69cd2ed547cc/details?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"flowId": "f3886259-e2d7-4acc-880a-69cd2ed547cc", "flowName": "flow_display_name",
"description": "test flow description", "tags": {"owner": "sdk-test"}, "flowType":
"Default", "experimentId": "00000000-0000-0000-0000-000000000000", "createdDate":
"2024-01-12T08:21:03.4561663Z", "lastModifiedDate": "2024-01-12T08:21:03.456185Z",
"owner": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"},
"flowResourceId": "azureml://locations/eastus/workspaces/00000/flows/f3886259-e2d7-4acc-880a-69cd2ed547cc",
"isArchived": false, "flowDefinitionFilePath": "Users/unknown_user/promptflow/flow_name/flow.dag.yaml"}'
headers:
connection:
- keep-alive
content-length:
- '1099'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.560'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-file-share/12.14.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:21:03 GMT
x-ms-version:
- '2023-08-03'
method: HEAD
uri: https://fake_account_name.file.core.windows.net/fake-file-share-name/Users/unknown_user/promptflow/flow_name/flow.dag.yaml
response:
body:
string: ''
headers:
content-length:
- '250'
content-type:
- application/octet-stream
last-modified:
- Fri, 12 Jan 2024 08:20:58 GMT
server:
- Windows-Azure-File/1.0 Microsoft-HTTPAPI/2.0
x-ms-file-attributes:
- Archive
x-ms-file-change-time:
- '2024-01-12T08:20:58.8299884Z'
x-ms-file-creation-time:
- '2024-01-12T08:20:57.7935437Z'
x-ms-file-id:
- '13835177425308221440'
x-ms-file-last-write-time:
- '2024-01-12T08:20:58.8299884Z'
x-ms-file-parent-id:
- '13835071872191954944'
x-ms-type:
- File
x-ms-version:
- '2023-08-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.144'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.206'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:21:07 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/simple_hello_world.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '22'
content-md5:
- SaVvJK8fXJzgPgQkmSaCGA==
content-type:
- application/octet-stream
last-modified:
- Thu, 23 Nov 2023 12:11:21 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Thu, 23 Nov 2023 12:11:20 GMT
x-ms-meta-name:
- 74c8f1fa-9e14-4249-8fec-279efedeb400
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 2266d840-3ecd-4a91-9e63-8d57e7b0a62e
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:21:08 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/simple_hello_world.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionResourceId": "azureml://locations/fake-region/workspaces/00000/flows/00000000-0000-0000-0000-000000000000/",
"runId": "name", "runDisplayName": "name", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/simple_hello_world.jsonl"},
"inputsMapping": {"name": "${data.name}"}, "connections": {}, "environmentVariables":
{}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '727'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '7.332'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/f3886259-e2d7-4acc-880a-69cd2ed547cc/flowRuns/name",
"flowRunId": "name", "flowRunDisplayName": "name", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/79f088fae0e502653c43146c9682f425/simple_hello_world.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.name}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/f3886259-e2d7-4acc-880a-69cd2ed547cc/name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "5003f634-d7a8-42ec-a7a9-519755b1c1fa",
"studioPortalEndpoint": "https://ml.azure.com/runs/name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12944'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.319'
status:
code: 200
message: OK
- request:
body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true,
"selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705047673, "rootRunId": "name", "createdUtc":
"2024-01-12T08:21:13.4108336+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 3,
"statusRevision": 1, "runUuid": "726be460-9edf-49d2-a74f-066dc85d64ba", "parentRunUuid":
null, "rootRunUuid": "726be460-9edf-49d2-a74f-066dc85d64ba", "lastStartTimeUtc":
null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc":
null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:21:17.8855829+00:00", "duration":
null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId":
null, "experimentId": "ea890465-3022-4c1a-b6d8-894897cbe16d", "status": "Preparing",
"startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName":
"name", "name": null, "dataContainerId": "dcid.name", "description": null,
"hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator":
null, "traits": [], "attribution": "PromptFlow", "computeType": "AmlcDsi"},
"properties": {"azureml.promptflow.runtime_name": "test-runtime-ci", "azureml.promptflow.runtime_version":
"20231204.v4", "azureml.promptflow.definition_file_name": "flow.dag.yaml",
"azureml.promptflow.session_id": "f3886259-e2d7-4acc-880a-69cd2ed547cc", "azureml.promptflow.flow_lineage_id":
"f3886259-e2d7-4acc-880a-69cd2ed547cc", "azureml.promptflow.flow_definition_resource_id":
"azureml://locations/eastus/workspaces/00000/flows/f3886259-e2d7-4acc-880a-69cd2ed547cc",
"azureml.promptflow.flow_id": "f3886259-e2d7-4acc-880a-69cd2ed547cc", "azureml.promptflow.input_data":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/79f088fae0e502653c43146c9682f425/simple_hello_world.jsonl",
"azureml.promptflow.inputs_mapping": "{\"name\":\"${data.name}\"}", "_azureml.evaluation_run":
"promptflow.BatchRun", "azureml.promptflow.snapshot_id": "5003f634-d7a8-42ec-a7a9-519755b1c1fa"},
"parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets":
[], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets":
[], "runDefinition": null, "jobSpecification": null, "primaryMetricName":
null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri":
null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition":
null, "jobSpecification": null, "systemSettings": null}'
headers:
connection:
- keep-alive
content-length:
- '3930'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.045'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_with_remote_flow.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_with_remote_flow.yaml",
"repo_id": "promptflow",
"token_count": 25860
} | 83 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.024'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.099'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.107'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.169'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:08:21 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '379'
content-md5:
- lI/pz9jzTQ7Td3RHPL7y7w==
content-type:
- application/octet-stream
last-modified:
- Mon, 06 Nov 2023 08:30:18 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Mon, 06 Nov 2023 08:30:18 GMT
x-ms-meta-name:
- 94331215-cf7f-452a-9f1a-1d276bc9b0e4
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 3f163752-edb0-4afc-a6f5-b0a670bd7c24
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:08:22 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.090'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.126'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:08:25 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/hello-world/flow.dag.yaml
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '266'
content-md5:
- UZm3TyOoKWjSR23+Up6qUA==
content-type:
- application/octet-stream
last-modified:
- Tue, 19 Dec 2023 06:05:25 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Tue, 19 Dec 2023 06:05:25 GMT
x-ms-meta-name:
- 7b68bf5e-6ef4-4eb3-9f49-28f9a5baad87
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:08:26 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/hello-world/flow.dag.yaml
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/hello-world/flow.dag.yaml",
"runId": "batch_run_name", "runDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"runExperimentName": "", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"},
"inputsMapping": {"name": "${data.url}"}, "connections": {}, "environmentVariables":
{}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '812'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"batch_run_name"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '7.455'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "477088e3-9e08-439c-a5ec-266ca0d49abc",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.422'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "477088e3-9e08-439c-a5ec-266ca0d49abc",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.359'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "477088e3-9e08-439c-a5ec-266ca0d49abc",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.425'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "477088e3-9e08-439c-a5ec-266ca0d49abc",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.452'
status:
code: 200
message: OK
- request:
body: '{"displayName": "test_display_name_test_mark", "description": "test_description_test_mark",
"tags": {"test_tag": "test_mark"}}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '207'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: PATCH
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/runs/batch_run_name/modify
response:
body:
string: '{"runNumber": 1705046913, "rootRunId": "batch_run_name", "createdUtc":
"2024-01-12T08:08:33.1182665+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 7,
"statusRevision": 3, "runUuid": "e04b700a-c607-479f-bf48-2ee2e2da3961", "parentRunUuid":
null, "rootRunUuid": "e04b700a-c607-479f-bf48-2ee2e2da3961", "lastStartTimeUtc":
null, "currentComputeTime": null, "computeDuration": "00:00:04.0069699", "effectiveStartTimeUtc":
null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:09:13.903583+00:00", "duration":
"00:00:04.0069699", "cancelationReason": null, "currentAttemptId": 1, "runId":
"batch_run_name", "parentRunId": null, "experimentId": "00000000-0000-0000-0000-000000000000",
"status": "Completed", "startTimeUtc": "2024-01-12T08:08:50.7845738+00:00",
"endTimeUtc": "2024-01-12T08:08:54.7915437+00:00", "scheduleId": null, "displayName":
"test_display_name_test_mark", "name": null, "dataContainerId": "dcid.batch_run_name",
"description": "test_description_test_mark", "hidden": false, "runType": "azureml.promptflow.FlowRun",
"runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow",
"computeType": "AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name":
"test-runtime-ci", "azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name":
"flow.dag.yaml", "azureml.promptflow.session_id": "bee356189f7e7f18671a79369c78df4cfb1bbd0c99069074",
"azureml.promptflow.flow_lineage_id": "f7ee724d91e4f4a7501bdc0b66995bc8b57f86b3a526fa2a81c34ebcccbbd912",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/36774154bc3ecde4aa21054b3052221f/hello-world/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl",
"azureml.promptflow.inputs_mapping": "{\"name\":\"${data.url}\"}", "_azureml.evaluation_run":
"promptflow.BatchRun", "azureml.promptflow.snapshot_id": "477088e3-9e08-439c-a5ec-266ca0d49abc",
"azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\":
\"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris":
{}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [],
"tags": {"test_tag": "test_mark"}, "settings": {}, "services": {}, "inputDatasets":
[], "outputDatasets": [], "runDefinition": null, "jobSpecification": null,
"primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri":
null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": null}'
headers:
connection:
- keep-alive
content-length:
- '3985'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.310'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "477088e3-9e08-439c-a5ec-266ca0d49abc",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.479'
status:
code: 200
message: OK
- request:
body: '{}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '2'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: PATCH
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/runs/batch_run_name/modify
response:
body:
string: '{"runNumber": 1705046913, "rootRunId": "batch_run_name", "createdUtc":
"2024-01-12T08:08:33.1182665+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 7,
"statusRevision": 3, "runUuid": "e04b700a-c607-479f-bf48-2ee2e2da3961", "parentRunUuid":
null, "rootRunUuid": "e04b700a-c607-479f-bf48-2ee2e2da3961", "lastStartTimeUtc":
null, "currentComputeTime": null, "computeDuration": "00:00:04.0069699", "effectiveStartTimeUtc":
null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:09:24.2248797+00:00", "duration":
"00:00:04.0069699", "cancelationReason": null, "currentAttemptId": 1, "runId":
"batch_run_name", "parentRunId": null, "experimentId": "00000000-0000-0000-0000-000000000000",
"status": "Completed", "startTimeUtc": "2024-01-12T08:08:50.7845738+00:00",
"endTimeUtc": "2024-01-12T08:08:54.7915437+00:00", "scheduleId": null, "displayName":
"test_display_name_test_mark", "name": null, "dataContainerId": "dcid.batch_run_name",
"description": "test_description_test_mark", "hidden": false, "runType": "azureml.promptflow.FlowRun",
"runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow",
"computeType": "AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name":
"test-runtime-ci", "azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name":
"flow.dag.yaml", "azureml.promptflow.session_id": "bee356189f7e7f18671a79369c78df4cfb1bbd0c99069074",
"azureml.promptflow.flow_lineage_id": "f7ee724d91e4f4a7501bdc0b66995bc8b57f86b3a526fa2a81c34ebcccbbd912",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/36774154bc3ecde4aa21054b3052221f/hello-world/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl",
"azureml.promptflow.inputs_mapping": "{\"name\":\"${data.url}\"}", "_azureml.evaluation_run":
"promptflow.BatchRun", "azureml.promptflow.snapshot_id": "477088e3-9e08-439c-a5ec-266ca0d49abc",
"azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\":
\"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris":
{}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [],
"tags": {"test_tag": "test_mark"}, "settings": {}, "services": {}, "inputDatasets":
[], "outputDatasets": [], "runDefinition": null, "jobSpecification": null,
"primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri":
null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": null}'
headers:
connection:
- keep-alive
content-length:
- '3986'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.073'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "hello_world", "type": "python",
"source": {"type": "code", "path": "hello_world.py"}, "inputs": {"name": "${inputs.name}"},
"tool": "hello_world.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "hello_world.py", "type": "python",
"inputs": {"name": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "hello_world.py", "function":
"hello_world", "is_builtin": false, "enable_kwargs": false, "tool_state":
"stable"}], "inputs": {"name": {"type": "string", "default": "hod", "is_chat_input":
false}}, "outputs": {"result": {"type": "string", "reference": "${hello_world.output}",
"evaluation_only": false, "is_chat_output": false}}}, "flowRunResourceId":
"azureml://locations/eastus/workspaces/00000/flows/batch_run_name/flowRuns/batch_run_name",
"flowRunId": "batch_run_name", "flowRunDisplayName": "sdk-cli-test-fixture-batch-run-without-llm",
"batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"name": "${data.url}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/batch_run_name/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "477088e3-9e08-439c-a5ec-266ca0d49abc",
"studioPortalEndpoint": "https://ml.azure.com/runs/batch_run_name?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12912'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.486'
status:
code: 200
message: OK
- request:
body: '{"runId": "batch_run_name", "selectRunMetadata": true, "selectRunDefinition":
true, "selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705046913, "rootRunId": "batch_run_name",
"createdUtc": "2024-01-12T08:08:33.1182665+00:00", "createdBy": {"userObjectId":
"00000000-0000-0000-0000-000000000000", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 7,
"statusRevision": 3, "runUuid": "e04b700a-c607-479f-bf48-2ee2e2da3961", "parentRunUuid":
null, "rootRunUuid": "e04b700a-c607-479f-bf48-2ee2e2da3961", "lastStartTimeUtc":
null, "currentComputeTime": null, "computeDuration": "00:00:04.0069699", "effectiveStartTimeUtc":
null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:09:13.903583+00:00", "duration":
"00:00:04.0069699", "cancelationReason": null, "currentAttemptId": 1, "runId":
"batch_run_name", "parentRunId": null, "experimentId": "b1e733a1-2a5f-4c17-bc34-4d66d2858228",
"status": "Completed", "startTimeUtc": "2024-01-12T08:08:50.7845738+00:00",
"endTimeUtc": "2024-01-12T08:08:54.7915437+00:00", "scheduleId": null, "displayName":
"test_display_name_test_mark", "name": null, "dataContainerId": "dcid.batch_run_name",
"description": "test_description_test_mark", "hidden": false, "runType": "azureml.promptflow.FlowRun",
"runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow",
"computeType": "AmlcDsi"}, "properties": {"azureml.promptflow.runtime_name":
"test-runtime-ci", "azureml.promptflow.runtime_version": "20231204.v4", "azureml.promptflow.definition_file_name":
"flow.dag.yaml", "azureml.promptflow.session_id": "bee356189f7e7f18671a79369c78df4cfb1bbd0c99069074",
"azureml.promptflow.flow_lineage_id": "f7ee724d91e4f4a7501bdc0b66995bc8b57f86b3a526fa2a81c34ebcccbbd912",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/36774154bc3ecde4aa21054b3052221f/hello-world/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl",
"azureml.promptflow.inputs_mapping": "{\"name\":\"${data.url}\"}", "_azureml.evaluation_run":
"promptflow.BatchRun", "azureml.promptflow.snapshot_id": "477088e3-9e08-439c-a5ec-266ca0d49abc",
"azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\":
\"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris":
{}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [],
"tags": {"test_tag": "test_mark"}, "settings": {}, "services": {}, "inputDatasets":
[], "outputDatasets": [], "runDefinition": null, "jobSpecification": null,
"primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri":
null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId":
"azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '4773'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.045'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name/logContent
response:
body:
string: '"2024-01-12 08:08:37 +0000 78 promptflow-runtime INFO [batch_run_name]
Receiving v2 bulk run request e934a20c-24c4-4d15-a844-2f2cb1cba4db: {\"flow_id\":
\"batch_run_name\", \"flow_run_id\": \"batch_run_name\", \"flow_source\":
{\"flow_source_type\": 1, \"flow_source_info\": {\"snapshot_id\": \"477088e3-9e08-439c-a5ec-266ca0d49abc\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.batch_run_name/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A43%3A15Z&ske=2024-01-13T15%3A53%3A15Z&sks=b&skv=2019-07-07&st=2024-01-12T07%3A58%3A37Z&se=2024-01-12T16%3A08%3A37Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl\"},
\"inputs_mapping\": {\"name\": \"${data.url}\"}, \"azure_storage_setting\":
{\"azure_storage_mode\": 1, \"storage_account_name\": \"promptfloweast4063704120\",
\"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\",
\"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/batch_run_name\",
\"blob_container_sas_token\": \"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T08%3A08%3A37Z&ske=2024-01-19T08%3A08%3A37Z&sks=b&skv=2019-07-07&se=2024-01-19T08%3A08%3A37Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 08:08:38 +0000 78
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:08:38 +0000 78 promptflow-runtime INFO Updating
batch_run_name to Status.Preparing...\n2024-01-12 08:08:38 +0000 78 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/39415/requests/batch_run_name\n2024-01-12
08:08:38 +0000 78 promptflow-runtime INFO Get snapshot sas url for
477088e3-9e08-439c-a5ec-266ca0d49abc...\n2024-01-12 08:08:45 +0000 78
promptflow-runtime INFO Downloading snapshot 477088e3-9e08-439c-a5ec-266ca0d49abc
from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/477088e3-9e08-439c-a5ec-266ca0d49abc.zip...\n2024-01-12
08:08:45 +0000 78 promptflow-runtime INFO Downloaded file /mnt/host/service/app/39415/requests/batch_run_name/477088e3-9e08-439c-a5ec-266ca0d49abc.zip
with size 495 for snapshot 477088e3-9e08-439c-a5ec-266ca0d49abc.\n2024-01-12
08:08:45 +0000 78 promptflow-runtime INFO Download snapshot 477088e3-9e08-439c-a5ec-266ca0d49abc
completed.\n2024-01-12 08:08:45 +0000 78 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/39415/requests/batch_run_name\n2024-01-12
08:08:45 +0000 78 promptflow-runtime INFO About to execute a python
flow.\n2024-01-12 08:08:45 +0000 78 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-12 08:08:45 +0000 78 promptflow-runtime
INFO Starting to check process 3535 status for run batch_run_name\n2024-01-12
08:08:45 +0000 78 promptflow-runtime INFO Start checking run status
for run batch_run_name\n2024-01-12 08:08:49 +0000 3535 promptflow-runtime
INFO [78--3535] Start processing flowV2......\n2024-01-12 08:08:49 +0000 3535
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:08:49 +0000 3535 promptflow-runtime INFO Setting
mlflow tracking uri...\n2024-01-12 08:08:49 +0000 3535 promptflow-runtime
INFO Validating ''AzureML Data Scientist'' user authentication...\n2024-01-12
08:08:49 +0000 3535 promptflow-runtime INFO Successfully validated
''AzureML Data Scientist'' user authentication.\n2024-01-12 08:08:49 +0000 3535
promptflow-runtime INFO Using AzureMLRunStorageV2\n2024-01-12 08:08:49
+0000 3535 promptflow-runtime INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:08:49 +0000 3535 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-12 08:08:50 +0000 3535 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:08:50 +0000 3535 promptflow-runtime INFO Resolve data from url finished
in 0.4670193735510111 seconds\n2024-01-12 08:08:50 +0000 3535 promptflow-runtime
INFO Starting the aml run ''batch_run_name''...\n2024-01-12 08:08:51 +0000 3535
execution.bulk INFO Using fork, process count: 3\n2024-01-12 08:08:51
+0000 3582 execution.bulk INFO Process 3582 started.\n2024-01-12
08:08:51 +0000 3587 execution.bulk INFO Process 3587 started.\n2024-01-12
08:08:51 +0000 3535 execution.bulk INFO Process name: ForkProcess-44:3,
Process id: 3582, Line number: 0 start execution.\n2024-01-12 08:08:51 +0000 3535
execution.bulk INFO Process name: ForkProcess-44:4, Process id: 3587,
Line number: 1 start execution.\n2024-01-12 08:08:51 +0000 3578 execution.bulk INFO Process
3578 started.\n2024-01-12 08:08:51 +0000 3535 execution.bulk INFO Process
name: ForkProcess-44:3, Process id: 3582, Line number: 0 completed.\n2024-01-12
08:08:51 +0000 3535 execution.bulk INFO Finished 1 / 3 lines.\n2024-01-12
08:08:51 +0000 3535 execution.bulk INFO Process name: ForkProcess-44:2,
Process id: 3578, Line number: 2 start execution.\n2024-01-12 08:08:51 +0000 3535
execution.bulk INFO Average execution time for completed lines: 0.21
seconds. Estimated time for incomplete lines: 0.42 seconds.\n2024-01-12 08:08:51
+0000 3535 execution.bulk INFO Process name: ForkProcess-44:4,
Process id: 3587, Line number: 1 completed.\n2024-01-12 08:08:51 +0000 3535
execution.bulk INFO Finished 2 / 3 lines.\n2024-01-12 08:08:51 +0000 3535
execution.bulk INFO Average execution time for completed lines: 0.14
seconds. Estimated time for incomplete lines: 0.14 seconds.\n2024-01-12 08:08:51
+0000 3535 execution.bulk INFO Process name: ForkProcess-44:2,
Process id: 3578, Line number: 2 completed.\n2024-01-12 08:08:51 +0000 3535
execution.bulk INFO Finished 3 / 3 lines.\n2024-01-12 08:08:51 +0000 3535
execution.bulk INFO Average execution time for completed lines: 0.11
seconds. Estimated time for incomplete lines: 0.0 seconds.\n2024-01-12 08:08:53
+0000 3535 execution.bulk INFO Upload status summary metrics for
run batch_run_name finished in 1.1852441783994436 seconds\n2024-01-12 08:08:53
+0000 3535 promptflow-runtime INFO Successfully write run properties
{\"azureml.promptflow.total_tokens\": 0, \"_azureml.evaluate_artifacts\":
\"[{\\\"path\\\": \\\"instance_results.jsonl\\\", \\\"type\\\": \\\"table\\\"}]\"}
with run id ''batch_run_name''\n2024-01-12 08:08:53 +0000 3535 execution.bulk INFO Upload
RH properties for run batch_run_name finished in 0.07909195311367512 seconds\n2024-01-12
08:08:53 +0000 3535 promptflow-runtime INFO Creating unregistered output
Asset for Run batch_run_name...\n2024-01-12 08:08:54 +0000 3535 promptflow-runtime
INFO Created debug_info Asset: azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1\n2024-01-12
08:08:54 +0000 3535 promptflow-runtime INFO Creating unregistered output
Asset for Run batch_run_name...\n2024-01-12 08:08:54 +0000 3535 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1\n2024-01-12
08:08:54 +0000 3535 promptflow-runtime INFO Creating Artifact for Run
batch_run_name...\n2024-01-12 08:08:54 +0000 3535 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-12 08:08:54 +0000 3535 promptflow-runtime
INFO Patching batch_run_name...\n2024-01-12 08:08:54 +0000 3535 promptflow-runtime
INFO Ending the aml run ''batch_run_name'' with status ''Completed''...\n2024-01-12
08:08:56 +0000 78 promptflow-runtime INFO Process 3535 finished\n2024-01-12
08:08:56 +0000 78 promptflow-runtime INFO [78] Child process finished!\n2024-01-12
08:08:56 +0000 78 promptflow-runtime INFO [batch_run_name] End processing
bulk run\n2024-01-12 08:08:56 +0000 78 promptflow-runtime INFO Cleanup
working dir /mnt/host/service/app/39415/requests/batch_run_name for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '9817'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.527'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_update_run.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_update_run.yaml",
"repo_id": "promptflow",
"token_count": 58301
} | 84 |
name: flow_run_20230629_101205
flow: ../flows/web_classification
data: ../datas/webClassification1.jsonl
column_mapping:
url: "${data.url}"
variant: ${summarize_text_content.variant_0}
resources:
instance_type: Standard_D2 # optional, server default value
idle_time_before_shutdown_minutes: 60 #optional, server default value
| promptflow/src/promptflow/tests/test_configs/runs/sample_bulk_run_with_resources.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/runs/sample_bulk_run_with_resources.yaml",
"repo_id": "promptflow",
"token_count": 115
} | 85 |
from enum import Enum
from promptflow.entities import InputSetting
from promptflow import tool
class UserType(str, Enum):
STUDENT = "student"
TEACHER = "teacher"
@tool(
name="My Tool with Enabled By Value",
description="This is my tool with enabled by value",
input_settings={
"teacher_id": InputSetting(enabled_by="user_type", enabled_by_value=[UserType.TEACHER]),
"student_id": InputSetting(enabled_by="user_type", enabled_by_value=[UserType.STUDENT]),
}
)
def my_tool(user_type: UserType, student_id: str = "", teacher_id: str = "") -> str:
"""This is a dummy function to support enabled by feature.
:param user_type: user type, student or teacher.
:param student_id: student id.
:param teacher_id: teacher id.
:return: id of the user.
If user_type is student, return student_id.
If user_type is teacher, return teacher_id.
"""
if user_type == UserType.STUDENT:
return student_id
elif user_type == UserType.TEACHER:
return teacher_id
else:
raise Exception("Invalid user.")
| promptflow/src/promptflow/tests/test_configs/tools/tool_with_enabled_by_value.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/tools/tool_with_enabled_by_value.py",
"repo_id": "promptflow",
"token_count": 396
} | 86 |
inputs:
outputs:
content:
type: string
reference: ${divide_num.output}
nodes:
- name: divide_num
type: python
source:
type: code
path: divide_num.py
inputs:
num: ${divide_num_2.output}
- name: divide_num_1
type: python
source:
type: code
path: divide_num.py
inputs:
num: ${divide_num.output}
- name: divide_num_2
type: python
source:
type: code
path: divide_num.py
inputs:
num: ${divide_num_1.output}
| promptflow/src/promptflow/tests/test_configs/wrong_flows/node_circular_dependency/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/node_circular_dependency/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 202
} | 87 |
# See https://pre-commit.com for more information
# See https://pre-commit.com/hooks.html for more hooks
exclude: '(^docs/)|flows|scripts|src/promptflow/promptflow/azure/_restclient/|src/promptflow/tests/test_configs|src/promptflow-tools'
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v3.2.0
hooks:
- id: trailing-whitespace
- id: end-of-file-fixer
- id: check-yaml
- id: check-json
- id: check-merge-conflict
- repo: https://github.com/psf/black
rev: 22.3.0 # Replace by any tag/version: https://github.com/psf/black/tags
hooks:
- id: black
language_version: python3 # Should be a command that runs python3.6+
args:
- "--line-length=120"
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.3.0
hooks:
- id: flake8
# Temporary disable this since it gets stuck when updating env
- repo: https://github.com/streetsidesoftware/cspell-cli
rev: v7.3.0
hooks:
- id: cspell
args: ['--config', '.cspell.json', "--no-must-find-files"]
- repo: https://github.com/hadialqattan/pycln
rev: v2.1.2 # Possible releases: https://github.com/hadialqattan/pycln/tags
hooks:
- id: pycln
name: "Clean unused python imports"
args: [--config=setup.cfg]
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
- id: isort
# stages: [commit]
name: isort-python
# Use black profile for isort to avoid conflicts
# see https://github.com/PyCQA/isort/issues/1518
args: ["--profile", "black", --line-length=120]
| promptflow/.pre-commit-config.yaml/0 | {
"file_path": "promptflow/.pre-commit-config.yaml",
"repo_id": "promptflow",
"token_count": 812
} | 0 |
In prompt flow, you can utilize connections to securely manage credentials or secrets for external services.
# Connections
Connections are for storing information about how to access external services like LLMs: endpoint, api keys etc.
- In your local development environment, the connections are persisted in your local machine with keys encrypted.
- In Azure AI, connections can be configured to be shared across the entire workspace. Secrets associated with connections are securely persisted in the corresponding Azure Key Vault, adhering to robust security and compliance standards.
Prompt flow provides a variety of pre-built connections, including Azure Open AI, Open AI, etc. These pre-built connections enable seamless integration with these resources within the built-in tools. Additionally, you have the flexibility to create custom connection types using key-value pairs, empowering them to tailor the connections to their specific requirements, particularly in Python tools.
| Connection type | Built-in tools |
| ------------------------------------------------------------ | ------------------------------- |
| [Azure Open AI](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service) | LLM or Python |
| [Open AI](https://openai.com/) | LLM or Python |
| [Cognitive Search](https://azure.microsoft.com/en-us/products/search) | Vector DB Lookup or Python |
| [Serp](https://serpapi.com/) | Serp API or Python |
| Custom | Python |
By leveraging connections in prompt flow, you can easily establish and manage connections to external APIs and data sources, facilitating efficient data exchange and interaction within their AI applications.
## Next steps
- [Create connections](../how-to-guides/manage-connections.md) | promptflow/docs/concepts/concept-connections.md/0 | {
"file_path": "promptflow/docs/concepts/concept-connections.md",
"repo_id": "promptflow",
"token_count": 641
} | 1 |
# Develop evaluation flow
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../faq.md#stable-vs-experimental).
:::
The evaluation flow is a flow to test/evaluate the quality of your LLM application (standard/chat flow). It usually runs on the outputs of standard/chat flow, and compute key metrics that can be used to determine whether the standard/chat flow performs well. See [Flows](../../concepts/concept-flows.md) for more information.
Before proceeding with this document, it is important to have a good understanding of the standard flow. Please make sure you have read [Develop standard flow](./develop-standard-flow.md), since they share many common features and these features won't be repeated in this doc, such as:
- `Inputs/Outputs definition`
- `Nodes`
- `Chain nodes in a flow`
While the evaluation flow shares similarities with the standard flow, there are some important differences that set it apart. The main distinctions are as follows:
- `Inputs from an existing run`: The evaluation flow contains inputs that are derived from the outputs of the standard/chat flow. These inputs are used for evaluation purposes.
- `Aggregation node`: The evaluation flow contains one or more aggregation nodes, where the actual evaluation takes place. These nodes are responsible for computing metrics and determining the performance of the standard/chat flow.
## Evaluation flow example
In this guide, we use [eval-classification-accuracy](https://github.com/microsoft/promptflow/tree/main/examples/flows/evaluation/eval-classification-accuracy) flow as an example of the evaluation flow. This is a flow illustrating how to evaluate the performance of a classification flow. It involves comparing each prediction to the groundtruth and assigns a `Correct` or `Incorrect` grade, and aggregating the results to produce metrics such as `accuracy`, which reflects how good the system is at classifying the data.
## Flow inputs
The flow `eval-classification-accuracy` contains two inputs:
```yaml
inputs:
groundtruth:
type: string
description: Groundtruth of the original question, it's the correct label that you hope your standard flow could predict.
default: APP
prediction:
type: string
description: The actual predicted outputs that your flow produces.
default: APP
```
As evident from the inputs description, the evaluation flow requires two specific inputs:
- `groundtruth`: This input represents the actual or expected values against which the performance of the standard/chat flow will be evaluated.
- `prediction`: The prediction input is derived from the outputs of another standard/chat flow. It contains the predicted values generated by the standard/chat flow, which will be compared to the groundtruth values during the evaluation process.
From the definition perspective, there is no difference compared with adding an input/output in a `standard/chat flow`. However when running an evaluation flow, you may need to specify the data source from both data file and flow run outputs. For more details please refer to [Run and evaluate a flow](../run-and-evaluate-a-flow/index.md#evaluate-your-flow).
## Aggregation node
Before introducing the aggregation node, let's see what a regular node looks like, we use node `grade` in the example flow for instance:
```yaml
- name: grade
type: python
source:
type: code
path: grade.py
inputs:
groundtruth: ${inputs.groundtruth}
prediction: ${inputs.prediction}
```
It takes both `groundtruth` and `prediction` from the flow inputs, compare them in the source code to see if they match:
```python
from promptflow import tool
@tool
def grade(groundtruth: str, prediction: str):
return "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect"
```
When it comes to an `aggregation node`, there are two key distinctions that set it apart from a regular node:
1. It has an attribute `aggregation` set to be `true`.
```yaml
- name: calculate_accuracy
type: python
source:
type: code
path: calculate_accuracy.py
inputs:
grades: ${grade.output}
aggregation: true # Add this attribute to make it an aggregation node
```
2. Its source code accepts a `List` type parameter which is a collection of the previous regular node's outputs.
```python
from typing import List
from promptflow import log_metric, tool
@tool
def calculate_accuracy(grades: List[str]):
result = []
for index in range(len(grades)):
grade = grades[index]
result.append(grade)
# calculate accuracy for each variant
accuracy = round((result.count("Correct") / len(result)), 2)
log_metric("accuracy", accuracy)
return result
```
The parameter `grades` in above function, contains all results that are produced by the regular node `grade`. Assuming the referred standard flow run has 3 outputs:
```json
{"prediction": "App"}
{"prediction": "Channel"}
{"prediction": "Academic"}
```
And we provides a data file like this:
```json
{"groundtruth": "App"}
{"groundtruth": "Channel"}
{"groundtruth": "Wiki"}
```
Then the `grades` value would be `["Correct", "Correct", "Incorrect"]`, and the final accuracy is `0.67`.
This example provides a straightforward demonstration of how to evaluate the classification flow. Once you have a solid understanding of the evaluation mechanism, you can customize and design your own evaluation method to suit your specific needs.
### More about the list parameter
What if the number of referred standard flow run outputs does not match the provided data file? We know that a standard flow can be executed against multiple line data and some of them could fail while others succeed. Consider the same standard flow run mentioned in above example but the `2nd` line run has failed, thus we have below run outputs:
```json
{"prediction": "App"}
{"prediction": "Academic"}
```
The promptflow flow executor has the capability to recognize the index of the referred run's outputs and extract the corresponding data from the provided data file. This means that during the execution process, even if the same data file is provided(3 lines), only the specific data mentioned below will be processed:
```json
{"groundtruth": "App"}
{"groundtruth": "Wiki"}
```
In this case, the `grades` value would be `["Correct", "Incorrect"]` and the accuracy is `0.5`.
### How to set aggregation node in VS Code Extention

## How to log metrics
:::{admonition} Limitation
You can only log metrics in an `aggregation node`, otherwise the metric will be ignored.
:::
Promptflow supports logging and tracking experiments using `log_metric` function. A metric is a key-value pair that records a single float measure. In a python node, you can log a metric with below code:
```python
from typing import List
from promptflow import log_metric, tool
@tool
def example_log_metrics(grades: List[str]):
# this node is an aggregation node so it accepts a list of grades
metric_key = "accuracy"
metric_value = round((grades.count("Correct") / len(result)), 2)
log_metric(metric_key, metric_value)
```
After the run is completed, you can run `pf run show-metrics -n <run_name>` to see the metrics.

| promptflow/docs/how-to-guides/develop-a-flow/develop-evaluation-flow.md/0 | {
"file_path": "promptflow/docs/how-to-guides/develop-a-flow/develop-evaluation-flow.md",
"repo_id": "promptflow",
"token_count": 1939
} | 2 |
# How-to Guides
Simple and short articles grouped by topics, each introduces a core feature of prompt flow and how you can use it to address your specific use cases.
```{toctree}
:maxdepth: 1
develop-a-flow/index
init-and-test-a-flow
add-conditional-control-to-a-flow
run-and-evaluate-a-flow/index
tune-prompts-with-variants
execute-flow-as-a-function
deploy-a-flow/index
enable-streaming-mode
manage-connections
manage-runs
set-global-configs
develop-a-tool/index
process-image-in-flow
faq
```
| promptflow/docs/how-to-guides/index.md/0 | {
"file_path": "promptflow/docs/how-to-guides/index.md",
"repo_id": "promptflow",
"token_count": 174
} | 3 |
# pf
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../how-to-guides/faq.md#stable-vs-experimental).
:::
Manage prompt flow resources with the prompt flow CLI.
| Command | Description |
|---------------------------------|---------------------------------|
| [pf flow](#pf-flow) | Manage flows. |
| [pf connection](#pf-connection) | Manage connections. |
| [pf run](#pf-run) | Manage runs. |
| [pf tool](#pf-tool) | Init or list tools. |
| [pf config](#pf-config) | Manage config for current user. |
| [pf upgrade](#pf-upgrade) | Upgrade prompt flow CLI. |
## pf flow
Manage promptflow flow flows.
| Command | Description |
| --- | --- |
| [pf flow init](#pf-flow-init) | Initialize a prompt flow directory. |
| [pf flow test](#pf-flow-test) | Test the prompt flow or flow node. |
| [pf flow validate](#pf-flow-validate) | Validate a flow and generate `flow.tools.json` for it. |
| [pf flow build](#pf-flow-build) | Build a flow for further sharing or deployment. |
| [pf flow serve](#pf-flow-serve) | Serve a flow as an endpoint. |
### pf flow init
Initialize a prompt flow directory.
```bash
pf flow init [--flow]
[--entry]
[--function]
[--prompt-template]
[--type]
[--yes]
```
#### Examples
Create a flow folder with code, prompts and YAML specification of the flow.
```bash
pf flow init --flow <path-to-flow-direcotry>
```
Create an evaluation prompt flow
```bash
pf flow init --flow <path-to-flow-direcotry> --type evaluation
```
Create a flow in existing folder
```bash
pf flow init --flow <path-to-existing-folder> --entry <entry.py> --function <function-name> --prompt-template <path-to-prompt-template.md>
```
#### Optional Parameters
`--flow`
The flow name to create.
`--entry`
The entry file name.
`--function`
The function name in entry file.
`--prompt-template`
The prompt template parameter and assignment.
`--type`
The initialized flow type.
accepted value: standard, evaluation, chat
`--yes --assume-yes -y`
Automatic yes to all prompts; assume 'yes' as answer to all prompts and run non-interactively.
### pf flow test
Test the prompt flow or flow node.
```bash
pf flow test --flow
[--inputs]
[--node]
[--variant]
[--debug]
[--interactive]
[--verbose]
```
#### Examples
Test the flow.
```bash
pf flow test --flow <path-to-flow-directory>
```
Test the flow with single line from input file.
```bash
pf flow test --flow <path-to-flow-directory> --inputs data_key1=data_val1 data_key2=data_val2
```
Test the flow with specified variant node.
```bash
pf flow test --flow <path-to-flow-directory> --variant '${node_name.variant_name}'
```
Test the single node in the flow.
```bash
pf flow test --flow <path-to-flow-directory> --node <node_name>
```
Debug the single node in the flow.
```bash
pf flow test --flow <path-to-flow-directory> --node <node_name> --debug
```
Chat in the flow.
```bash
pf flow test --flow <path-to-flow-directory> --node <node_name> --interactive
```
#### Required Parameter
`--flow`
The flow directory to test.
#### Optional Parameters
`--inputs`
Input data for the flow. Example: --inputs data1=data1_val data2=data2_val
`--node`
The node name in the flow need to be tested.
`--variant`
Node & variant name in format of ${node_name.variant_name}.
`--debug`
Debug the single node in the flow.
`--interactive`
Start a interactive chat session for chat flow.
`--verbose`
Displays the output for each step in the chat flow.
### pf flow validate
Validate the prompt flow and generate a `flow.tools.json` under `.promptflow`. This file is required when using flow as a component in a Azure ML pipeline.
```bash
pf flow validate --source
[--debug]
[--verbose]
```
#### Examples
Validate the flow.
```bash
pf flow validate --source <path-to-flow>
```
#### Required Parameter
`--source`
The flow source to validate.
### pf flow build
Build a flow for further sharing or deployment.
```bash
pf flow build --source
--output
--format
[--variant]
[--verbose]
[--debug]
```
#### Examples
Build a flow as docker, which can be built into Docker image via `docker build`.
```bash
pf flow build --source <path-to-flow> --output <output-path> --format docker
```
Build a flow as docker with specific variant.
```bash
pf flow build --source <path-to-flow> --output <output-path> --format docker --variant '${node_name.variant_name}'
```
#### Required Parameter
`--source`
The flow or run source to be used.
`--output`
The folder to output built flow. Need to be empty or not existed.
`--format`
The format to build flow into
#### Optional Parameters
`--variant`
Node & variant name in format of ${node_name.variant_name}.
`--verbose`
Show more details for each step during build.
`--debug`
Show debug information during build.
### pf flow serve
Serving a flow as an endpoint.
```bash
pf flow serve --source
[--port]
[--host]
[--environment-variables]
[--verbose]
[--debug]
[--skip-open-browser]
```
#### Examples
Serve flow as an endpoint.
```bash
pf flow serve --source <path-to-flow>
```
Serve flow as an endpoint with specific port and host.
```bash
pf flow serve --source <path-to-flow> --port <port> --host <host> --environment-variables key1="`${my_connection.api_key}`" key2="value2"
```
#### Required Parameter
`--source`
The flow or run source to be used.
#### Optional Parameters
`--port`
The port on which endpoint to run.
`--host`
The host of endpoint.
`--environment-variables`
Environment variables to set by specifying a property path and value. Example: --environment-variable key1="\`${my_connection.api_key}\`" key2="value2". The value reference to connection keys will be resolved to the actual value, and all environment variables specified will be set into `os.environ`.
`--verbose`
Show more details for each step during serve.
`--debug`
Show debug information during serve.
`--skip-open-browser`
Skip opening browser after serve. Store true parameter.
## pf connection
Manage prompt flow connections.
| Command | Description |
| --- | --- |
| [pf connection create](#pf-connection-create) | Create a connection. |
| [pf connection update](#pf-connection-update) | Update a connection. |
| [pf connection show](#pf-connection-show) | Show details of a connection. |
| [pf connection list](#pf-connection-list) | List all the connection. |
| [pf connection delete](#pf-connection-delete) | Delete a connection. |
### pf connection create
Create a connection.
```bash
pf connection create --file
[--name]
[--set]
```
#### Examples
Create a connection with YAML file.
```bash
pf connection create -f <yaml-filename>
```
Create a connection with YAML file with override.
```bash
pf connection create -f <yaml-filename> --set api_key="<api-key>"
```
Create a custom connection with .env file; note that overrides specified by `--set` will be ignored.
```bash
pf connection create -f .env --name <name>
```
#### Required Parameter
`--file -f`
Local path to the YAML file containing the prompt flow connection specification.
#### Optional Parameters
`--name -n`
Name of the connection.
`--set`
Update an object by specifying a property path and value to set. Example: --set property1.property2=.
### pf connection update
Update a connection.
```bash
pf connection update --name
[--set]
```
#### Example
Update a connection.
```bash
pf connection update -n <name> --set api_key="<api-key>"
```
#### Required Parameter
`--name -n`
Name of the connection.
#### Optional Parameter
`--set`
Update an object by specifying a property path and value to set. Example: --set property1.property2=.
### pf connection show
Show details of a connection.
```bash
pf connection show --name
```
#### Required Parameter
`--name -n`
Name of the connection.
### pf connection list
List all the connection.
```bash
pf connection list
```
### pf connection delete
Delete a connection.
```bash
pf connection delete --name
```
#### Required Parameter
`--name -n`
Name of the connection.
## pf run
Manage prompt flow runs.
| Command | Description |
| --- | --- |
| [pf run create](#pf-run-create) | Create a run. |
| [pf run update](#pf-run-update) | Update a run metadata, including display name, description and tags. |
| [pf run stream](#pf-run-stream) | Stream run logs to the console. |
| [pf run list](#pf-run-list) | List runs. |
| [pf run show](#pf-run-show) | Show details for a run. |
| [pf run show-details](#pf-run-show-details) | Preview a run's intput(s) and output(s). |
| [pf run show-metrics](#pf-run-show-metrics) | Print run metrics to the console. |
| [pf run visualize](#pf-run-visualize) | Visualize a run. |
| [pf run archive](#pf-run-archive) | Archive a run. |
| [pf run restore](#pf-run-restore) | Restore an archived run. |
### pf run create
Create a run.
```bash
pf run create [--file]
[--flow]
[--data]
[--column-mapping]
[--run]
[--variant]
[--stream]
[--environment-variables]
[--connections]
[--set]
[--source]
```
#### Examples
Create a run with YAML file.
```bash
pf run create -f <yaml-filename>
```
Create a run with YAML file and replace another data in the YAML file.
```bash
pf run create -f <yaml-filename> --data <path-to-new-data-file-relative-to-yaml-file>
```
Create a run from flow directory and reference a run.
```bash
pf run create --flow <path-to-flow-directory> --data <path-to-data-file> --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run <run-name> --variant '${summarize_text_content.variant_0}' --stream
```
Create a run from an existing run record folder.
```bash
pf run create --source <path-to-run-folder>
```
#### Optional Parameters
`--file -f`
Local path to the YAML file containing the prompt flow run specification; can be overwritten by other parameters. Reference [here](https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json) for YAML schema.
`--flow`
Local path to the flow directory. If --file is provided, this path should be relative path to the file.
`--data`
Local path to the data file. If --file is provided, this path should be relative path to the file.
`--column-mapping`
Inputs column mapping, use `${data.xx}` to refer to data columns, use `${run.inputs.xx}` to refer to referenced run's data columns, and `${run.outputs.xx}` to refer to run outputs columns.
`--run`
Referenced flow run name. For example, you can run an evaluation flow against an existing run. For example, "pf run create --flow evaluation_flow_dir --run existing_bulk_run".
`--variant`
Node & variant name in format of `${node_name.variant_name}`.
`--stream -s`
Indicates whether to stream the run's logs to the console.
default value: False
`--environment-variables`
Environment variables to set by specifying a property path and value. Example:
`--environment-variable key1='${my_connection.api_key}' key2='value2'`. The value reference
to connection keys will be resolved to the actual value, and all environment variables
specified will be set into os.environ.
`--connections`
Overwrite node level connections with provided value.
Example: `--connections node1.connection=test_llm_connection node1.deployment_name=gpt-35-turbo`
`--set`
Update an object by specifying a property path and value to set.
Example: `--set property1.property2=<value>`.
`--source`
Local path to the existing run record folder.
### pf run update
Update a run metadata, including display name, description and tags.
```bash
pf run update --name
[--set]
```
#### Example
Update a run
```bash
pf run update -n <name> --set display_name="<display-name>" description="<description>" tags.key="value"
```
#### Required Parameter
`--name -n`
Name of the run.
#### Optional Parameter
`--set`
Update an object by specifying a property path and value to set. Example: --set property1.property2=.
### pf run stream
Stream run logs to the console.
```bash
pf run stream --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run list
List runs.
```bash
pf run list [--all-results]
[--archived-only]
[--include-archived]
[--max-results]
```
#### Optional Parameters
`--all-results`
Returns all results.
default value: False
`--archived-only`
List archived runs only.
default value: False
`--include-archived`
List archived runs and active runs.
default value: False
`--max-results -r`
Max number of results to return. Default is 50.
default value: 50
### pf run show
Show details for a run.
```bash
pf run show --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run show-details
Preview a run's input(s) and output(s).
```bash
pf run show-details --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run show-metrics
Print run metrics to the console.
```bash
pf run show-metrics --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run visualize
Visualize a run in the browser.
```bash
pf run visualize --names
```
#### Required Parameter
`--names -n`
Name of the runs, comma separated.
### pf run archive
Archive a run.
```bash
pf run archive --name
```
#### Required Parameter
`--name -n`
Name of the run.
### pf run restore
Restore an archived run.
```bash
pf run restore --name
```
#### Required Parameter
`--name -n`
Name of the run.
## pf tool
Manage promptflow tools.
| Command | Description |
| --- | --- |
| [pf tool init](#pf-tool-init) | Initialize a tool directory. |
| [pf tool list](#pf-tool-list) | List all tools in the environment. |
| [pf tool validate](#pf-tool-validate) | Validate tools. |
### pf tool init
Initialize a tool directory.
```bash
pf tool init [--package]
[--tool]
[--set]
```
#### Examples
Creating a package tool from scratch.
```bash
pf tool init --package <package-name> --tool <tool-name>
```
Creating a package tool with extra info.
```bash
pf tool init --package <package-name> --tool <tool-name> --set icon=<icon-path> category=<tool-category> tags="{'<key>': '<value>'}"
```
Creating a package tool from scratch.
```bash
pf tool init --package <package-name> --tool <tool-name>
```
Creating a python tool from scratch.
```bash
pf tool init --tool <tool-name>
```
#### Optional Parameters
`--package`
The package name to create.
`--tool`
The tool name to create.
`--set`
Set extra information about the tool, like category, icon and tags. Example: --set <key>=<value>.
### pf tool list
List all tools in the environment.
```bash
pf tool list [--flow]
```
#### Examples
List all package tool in the environment.
```bash
pf tool list
```
List all package tool and code tool in the flow.
```bash
pf tool list --flow <path-to-flow-direcotry>
```
#### Optional Parameters
`--flow`
The flow directory.
### pf tool validate
Validate tool.
```bash
pf tool validate --source
```
#### Examples
Validate single function tool.
```bash
pf tool validate -–source <package-name>.<module-name>.<tool-function>
```
Validate all tool in a package tool.
```bash
pf tool validate -–source <package-name>
```
Validate tools in a python script.
```bash
pf tool validate --source <path-to-tool-script>
```
#### Required Parameter
`--source`
The tool source to be used.
## pf config
Manage config for current user.
| Command | Description |
|-----------------------------------|--------------------------------------------|
| [pf config set](#pf-config-set) | Set prompt flow configs for current user. |
| [pf config show](#pf-config-show) | Show prompt flow configs for current user. |
### pf config set
Set prompt flow configs for current user, configs will be stored at ~/.promptflow/pf.yaml.
```bash
pf config set
```
#### Examples
Config connection provider to azure workspace for current user.
```bash
pf config set connection.provider="azureml://subscriptions/<your-subscription>/resourceGroups/<your-resourcegroup>/providers/Microsoft.MachineLearningServices/workspaces/<your-workspace>"
```
### pf config show
Show prompt flow configs for current user.
```bash
pf config show
```
#### Examples
Show prompt flow for current user.
```bash
pf config show
```
## pf upgrade
Upgrade prompt flow CLI.
| Command | Description |
|-----------------------------|-----------------------------|
| [pf upgrade](#pf-upgrade) | Upgrade prompt flow CLI. |
### Examples
Upgrade prompt flow without prompt and run non-interactively.
```bash
pf upgrade --yes
``` | promptflow/docs/reference/pf-command-reference.md/0 | {
"file_path": "promptflow/docs/reference/pf-command-reference.md",
"repo_id": "promptflow",
"token_count": 6270
} | 4 |
# Contributing to examples folder
Thank you for your interest in contributing to the examples folder. This folder contains a collection of Python notebooks and selected markdown files that demonstrate various usage of this promptflow project. The script will automatically generate a README.md file in the root folder, listing all the notebooks and markdown files with their corresponding workflows.
## Guidelines for notebooks and markdown files in examples folder
When creating or modifying a notebook or markdown file, please follow these guidelines:
- Each notebook or markdown file should have a clear and descriptive title as the first line
- Each notebook or markdown file should have a brief introduction that explains the purpose and scope of the example. For details, please refer to the readme workflow generator manual [README.md](../scripts/readme/README.md) file.
- The first sentence of first paragraph of the markdown file is important. The introduction should be concise and informative, and end with a period.
- Each notebook file should have a metadata area when the file is opened as a big JSON file. The metadata area may contain the following fields:
- `.metadata.description`: (Mandatory) A short description of the example that will be displayed in the README.md file. The description should be concise and informative, and end with a period.
- `.metadata.stage`: (Optional) A value that indicates whether the script should skip generating a workflow for this notebook or markdown file. If set to `development`, the script will ignore this file. If set to other values or omitted, the script will generate a workflow for this file.
- Each notebook or markdown file should have a clear and logical structure, using appropriate headings, subheadings, comments, and code cells. The code cells should be executable and produce meaningful outputs.
- Each notebook or markdown file should follow the [PEP 8](https://peps.python.org/pep-0008/) style guide for Python code, and use consistent and readable variable names, indentation, spacing, and punctuation.
- Each notebook or markdown file should include relevant references, citations, and acknowledgements.
- If you are contributing to [tutorial](./tutorials/), each notebook or markdown file should declare its dependent resources in its metadata, so that the auto generated workflow can listen to the changes of these resources to avoid unexpected breaking. Resources should be declared with relative path to the repo root, and here are examples for [notebook](./tutorials/get-started/quickstart.ipynb) and [markdown](./tutorials/e2e-development/chat-with-pdf.md).
## Generate workflows, update README.md and submit pull requests
To run the readme.py script, you need to have Python 3 installed on your system. You also need to install the required packages by running:
```bash
# At this examples folder
pip install -r requirements.txt
pip install -r dev_requirements.txt
```
Then, you can run the script by:
```bash
# At the root of this repository
python scripts/readme/readme.py
```
For detailed usage of readme.py, please refer to the readme workflow generator manual [README.md](../scripts/readme/README.md)
### Update [README.md](./README.md) in [examples](./) folder
The readme.py script will scan all the notebooks and markdown files in the examples folder, and generate a README.md file in the root folder. The README.md file will contain a table of contents with links to each notebook and markdown file, as well as their descriptions and workflows.
### Generations in the [workflows](../.github/workflows/) folder
This contains two parts:
* For notebooks, we'll prepare standard workflow running environment to test the notebook to the end.
* For markdowns, The workflows are generated by extracting the `bash` cells from markdown file. The workflows will prepare standard workflow running environment and test these cells to the end.
The script will also save workflows in the [workflows](../.github/workflows/) folder, where each notebook or markdown file will have a corresponding workflow file with the `.yml` extension. The workflow files can be triggered by creating a new pull request or pushing a new commit to the repository. The workflow will run the notebook or markdown file, and you could check the outputs afterwards.
## Feedback and Support
If you have any feedback or need any support regarding this folder, submit an issue on GitHub. We appreciate your contribution and hope you enjoy using our project.
| promptflow/examples/CONTRIBUTING.md/0 | {
"file_path": "promptflow/examples/CONTRIBUTING.md",
"repo_id": "promptflow",
"token_count": 1042
} | 5 |
from promptflow import tool
from chat_with_pdf.build_index import create_faiss_index
@tool
def build_index_tool(pdf_path: str) -> str:
return create_faiss_index(pdf_path)
| promptflow/examples/flows/chat/chat-with-pdf/build_index_tool.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/build_index_tool.py",
"repo_id": "promptflow",
"token_count": 61
} | 6 |
import os
from utils.oai import OAIChat
def qna(prompt: str, history: list):
max_completion_tokens = int(os.environ.get("MAX_COMPLETION_TOKENS"))
chat = OAIChat()
stream = chat.stream(
messages=history + [{"role": "user", "content": prompt}],
max_tokens=max_completion_tokens,
)
return stream
| promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/qna.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/qna.py",
"repo_id": "promptflow",
"token_count": 140
} | 7 |
from promptflow import tool
from chat_with_pdf.download import download
@tool
def download_tool(url: str, env_ready_signal: str) -> str:
return download(url)
| promptflow/examples/flows/chat/chat-with-pdf/download_tool.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/download_tool.py",
"repo_id": "promptflow",
"token_count": 52
} | 8 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
groundtruth:
type: string
description: Please specify the groundtruth column, which contains the true label
to the outputs that your flow produces.
default: APP
prediction:
type: string
description: Please specify the prediction column, which contains the predicted
outputs that your flow produces.
default: APP
outputs:
grade:
type: string
reference: ${grade.output}
nodes:
- name: grade
type: python
source:
type: code
path: grade.py
inputs:
groundtruth: ${inputs.groundtruth}
prediction: ${inputs.prediction}
- name: calculate_accuracy
type: python
source:
type: code
path: calculate_accuracy.py
inputs:
grades: ${grade.output}
aggregation: true
environment:
python_requirements_txt: requirements.txt
| promptflow/examples/flows/evaluation/eval-classification-accuracy/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-classification-accuracy/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 297
} | 9 |
user:
# Instructions
* There are many chatbots that can answer users questions based on the context given from different sources like search results, or snippets from books/papers. They try to understand users's question and then get context by either performing search from search engines, databases or books/papers for relevant content. Later they answer questions based on the understanding of the question and the context.
* Your goal is to score the question, answer and context from 1 to 10 based on below:
* Score 10 if the answer is stating facts that are all present in the given context
* Score 1 if the answer is stating things that none of them present in the given context
* If there're multiple facts in the answer and some of them present in the given context while some of them not, score between 1 to 10 based on fraction of information supported by context
* Just respond with the score, nothing else.
# Real work
## Question
{{question}}
## Answer
{{answer}}
## Context
{{context}}
## Score | promptflow/examples/flows/evaluation/eval-groundedness/gpt_groundedness.md/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-groundedness/gpt_groundedness.md",
"repo_id": "promptflow",
"token_count": 231
} | 10 |
system:
You are an AI assistant. You will be given the definition of an evaluation metric for assessing the quality of an answer in a question-answering task. Your job is to compute an accurate evaluation score using the provided evaluation metric.
user:
Coherence of an answer is measured by how well all the sentences fit together and sound naturally as a whole. Consider the overall quality of the answer when evaluating coherence. Given the question and answer, score the coherence of answer between one to five stars using the following rating scale:
One star: the answer completely lacks coherence
Two stars: the answer mostly lacks coherence
Three stars: the answer is partially coherent
Four stars: the answer is mostly coherent
Five stars: the answer has perfect coherency
This rating value should always be an integer between 1 and 5. So the rating produced should be 1 or 2 or 3 or 4 or 5.
question: What is your favorite indoor activity and why do you enjoy it?
answer: I like pizza. The sun is shining.
stars: 1
question: Can you describe your favorite movie without giving away any spoilers?
answer: It is a science fiction movie. There are dinosaurs. The actors eat cake. People must stop the villain.
stars: 2
question: What are some benefits of regular exercise?
answer: Regular exercise improves your mood. A good workout also helps you sleep better. Trees are green.
stars: 3
question: How do you cope with stress in your daily life?
answer: I usually go for a walk to clear my head. Listening to music helps me relax as well. Stress is a part of life, but we can manage it through some activities.
stars: 4
question: What can you tell me about climate change and its effects on the environment?
answer: Climate change has far-reaching effects on the environment. Rising temperatures result in the melting of polar ice caps, contributing to sea-level rise. Additionally, more frequent and severe weather events, such as hurricanes and heatwaves, can cause disruption to ecosystems and human societies alike.
stars: 5
question: {{question}}
answer: {{answer}}
stars: | promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_coherence_prompt.jinja2/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-qna-non-rag/gpt_coherence_prompt.jinja2",
"repo_id": "promptflow",
"token_count": 464
} | 11 |
system:
You will be provided a question, a conversation history, fetched documents related to the question and a response to the question in the domain. You task is to evaluate the quality of the provided response by following the steps below:
- Understand the context of the question based on the conversation history.
- Generate a reference answer that is only based on the conversation history, question, and fetched documents. Don't generate the reference answer based on your own knowledge.
- You need to rate the provided response according to the reference answer if it's available on a scale of 1 (poor) to 5 (excellent), based on the below criteria:
- 5 - Ideal: The provided response includes all information necessary to answer the question based on the reference answer and conversation history. Please be strict about giving a 5 score.
- 4 - Mostly Relevant: The provided response is mostly relevant, although it may be a little too narrow or too broad based on the reference answer and conversation history.
- 3 - Somewhat Relevant: The provided response may be partly helpful but might be hard to read or contain other irrelevant content based on the reference answer and conversation history.
- 2 - Barely Relevant: The provided response is barely relevant, perhaps shown as a last resort based on the reference answer and conversation history.
- 1 - Completely Irrelevant: The provided response should never be used for answering this question based on the reference answer and conversation history.
- You need to rate the provided response to be 5, if the reference answer can not be generated since no relevant documents were retrieved.
- You need to first provide a scoring reason for the evaluation according to the above criteria, and then provide a score for the quality of the provided response.
- You need to translate the provided response into English if it's in another language.
- Your final response must include both the reference answer and the evaluation result. The evaluation result should be written in English. Your response should be in the following format:
```
[assistant](#evaluation result)
<start reference answer>
[insert the reference answer here]
<end reference answer>
<start quality score reasoning>
Quality score reasoning: [insert score reasoning here]
<end quality score reasoning>
<start quality score>
Quality score: [insert score here]/5
<end quality score>
```
- Your answer must end with <|im_end|>.
user:
#conversation history
#question
{{question}}
#fetched documents
{{FullBody}}
#provided response
{{answer}}
assistant:
#evaluation result
<start reference answer>""" | promptflow/examples/flows/evaluation/eval-qna-rag-metrics/rag_generation_prompt.jinja2/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-qna-rag-metrics/rag_generation_prompt.jinja2",
"repo_id": "promptflow",
"token_count": 579
} | 12 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json
name: azure_ai_translator_connection
type: custom
configs:
endpoint: "<azure-translator-resource-endpoint>"
region: "<azure-translator-resource-region>"
secrets:
api_key: "<to-be-replaced>" | promptflow/examples/flows/integrations/azure-ai-language/connections/azure_ai_translator.yml/0 | {
"file_path": "promptflow/examples/flows/integrations/azure-ai-language/connections/azure_ai_translator.yml",
"repo_id": "promptflow",
"token_count": 102
} | 13 |
Determine which next function to use, and respond using stringfield JSON object.
If you have completed all your tasks, make sure to use the 'finish' function to signal and remember show your results. | promptflow/examples/flows/standard/autonomous-agent/triggering_prompt.jinja2/0 | {
"file_path": "promptflow/examples/flows/standard/autonomous-agent/triggering_prompt.jinja2",
"repo_id": "promptflow",
"token_count": 44
} | 14 |
AZURE_OPENAI_API_KEY=<your_AOAI_key>
AZURE_OPENAI_API_BASE=<your_AOAI_endpoint>
AZURE_OPENAI_API_TYPE=azure
| promptflow/examples/flows/standard/basic/.env.example/0 | {
"file_path": "promptflow/examples/flows/standard/basic/.env.example",
"repo_id": "promptflow",
"token_count": 58
} | 15 |
from promptflow import tool
@tool
def llm_result(question: str) -> str:
# You can use an LLM node to replace this tool.
return (
"Prompt flow is a suite of development tools designed to streamline "
"the end-to-end development cycle of LLM-based AI applications."
)
| promptflow/examples/flows/standard/conditional-flow-for-if-else/llm_result.py/0 | {
"file_path": "promptflow/examples/flows/standard/conditional-flow-for-if-else/llm_result.py",
"repo_id": "promptflow",
"token_count": 100
} | 16 |
# Customer Intent Extraction
This sample is using OpenAI chat model(ChatGPT/GPT4) to identify customer intent from customer's question.
By going through this sample you will learn how to create a flow from existing working code (written in LangChain in this case).
This is the [existing code](./intent.py).
## Prerequisites
Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
Ensure you have put your azure open ai endpoint key in .env file.
```bash
cat .env
```
## Run flow
1. init flow directory - create promptflow folder from existing python file
```bash
pf flow init --flow . --entry intent.py --function extract_intent --prompt-template chat_prompt=user_intent_zero_shot.jinja2
```
The generated files:
- extract_intent_tool.py: Wrap the func `extract_intent` in the `intent.py` script into a [Python Tool](https://promptflow.azurewebsites.net/tools-reference/python-tool.html).
- flow.dag.yaml: Describes the DAG(Directed Acyclic Graph) of this flow.
- .gitignore: File/folder in the flow to be ignored.
2. create needed custom connection
```bash
pf connection create -f .env --name custom_connection
```
3. test flow with single line input
```bash
pf flow test --flow . --input ./data/denormalized-flat.jsonl
```
4. run with multiple lines input
```bash
pf run create --flow . --data ./data --column-mapping history='${data.history}' customer_info='${data.customer_info}'
```
You can also skip providing `column-mapping` if provided data has same column name as the flow.
Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
5. list/show
```bash
# list created run
pf run list
# get a sample completed run name
name=$(pf run list | jq '.[] | select(.name | contains("customer_intent_extraction")) | .name'| head -n 1 | tr -d '"')
# show run
pf run show --name $name
# show specific run detail, top 3 lines
pf run show-details --name $name -r 3
```
6. evaluation
```bash
# create evaluation run
pf run create --flow ../../evaluation/eval-classification-accuracy --data ./data --column-mapping groundtruth='${data.intent}' prediction='${run.outputs.output}' --run $name
```
```bash
# get the evaluation run in previous step
eval_run_name=$(pf run list | jq '.[] | select(.name | contains("eval_classification_accuracy")) | .name'| head -n 1 | tr -d '"')
# show run
pf run show --name $eval_run_name
# show run output
pf run show-details --name $eval_run_name -r 3
```
6. visualize
```bash
# visualize in browser
pf run visualize --name $eval_run_name # your evaluation run name
```
## Deploy
### Serve as a local test app
```bash
pf flow serve --source . --port 5123 --host localhost
```
Visit http://localhost:5213 to access the test app.
### Export
#### Export as docker
```bash
# pf flow export --source . --format docker --output ./package
``` | promptflow/examples/flows/standard/customer-intent-extraction/README.md/0 | {
"file_path": "promptflow/examples/flows/standard/customer-intent-extraction/README.md",
"repo_id": "promptflow",
"token_count": 905
} | 17 |
{"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
{"url": "https://arxiv.org/abs/2307.04767", "answer": "Academic", "evidence": "Text content"}
{"url": "https://play.google.com/store/apps/details?id=com.twitter.android", "answer": "App", "evidence": "Both"}
| promptflow/examples/flows/standard/flow-with-additional-includes/data.jsonl/0 | {
"file_path": "promptflow/examples/flows/standard/flow-with-additional-includes/data.jsonl",
"repo_id": "promptflow",
"token_count": 114
} | 18 |
import difflib
import webbrowser
def show_diff(left_content, right_content, name="file"):
d = difflib.HtmlDiff()
html = d.make_file(
left_content.splitlines(),
right_content.splitlines(),
"origin " + name,
"new " + name,
context=True,
numlines=20)
html = html.encode()
html_name = name + "_diff.html"
with open(html_name, "w+b") as fp:
fp.write(html)
webbrowser.open(html_name)
| promptflow/examples/flows/standard/gen-docstring/diff.py/0 | {
"file_path": "promptflow/examples/flows/standard/gen-docstring/diff.py",
"repo_id": "promptflow",
"token_count": 210
} | 19 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
environment:
python_requirements_txt: requirements.txt
inputs:
math_question:
type: string
default: If a rectangle has a length of 10 and width of 5, what is the area?
outputs:
code:
type: string
reference: ${code_refine.output}
answer:
type: string
reference: ${final_code_execution.output}
nodes:
- name: final_code_execution
type: python
source:
type: code
path: code_execution.py
inputs:
code_snippet: ${code_refine.output}
- name: math_example
type: python
source:
type: code
path: math_example.py
inputs: {}
- name: code_refine
type: python
source:
type: code
path: code_refine.py
inputs:
original_code: ${code_gen.output}
- name: code_gen
type: llm
source:
type: code
path: ask_llm.jinja2
inputs:
# This is to easily switch between openai and azure openai.
# deployment_name is required by azure openai, model is required by openai.
deployment_name: gpt-35-turbo
model: gpt-3.5-turbo
question: ${inputs.math_question}
examples: ${math_example.output}
connection: open_ai_connection
api: chat
| promptflow/examples/flows/standard/maths-to-code/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/standard/maths-to-code/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 457
} | 20 |
# Web Classification
This is a flow demonstrating multi-class classification with LLM. Given an url, it will classify the url into one web category with just a few shots, simple summarization and classification prompts.
## Tools used in this flow
- LLM Tool
- Python Tool
## What you will learn
In this flow, you will learn
- how to compose a classification flow with LLM.
- how to feed few shots to LLM classifier.
## Prerequisites
Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Getting Started
### 1. Setup connection
If you are using Azure Open AI, prepare your resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
If you using OpenAI, sign up account [OpenAI website](https://openai.com/), login and [find personal API key](https://platform.openai.com/account/api-keys).
```shell
pf connection create --file ../../../connections/openai.yml --set api_key=<your_api_key>
```
### 2. Configure the flow with your connection
`flow.dag.yaml` is already configured with connection named `open_ai_connection`.
### 3. Test flow with single line data
```bash
# test with default input value in flow.dag.yaml
pf flow test --flow .
# test with user specified inputs
pf flow test --flow . --inputs url='https://www.youtube.com/watch?v=kYqRtjDBci8'
```
### 4. Run with multi-line data
```bash
# create run using command line args
pf run create --flow . --data ./data.jsonl --column-mapping url='${data.url}' --stream
# (Optional) create a random run name
run_name="web_classification_"$(openssl rand -hex 12)
# create run using yaml file, run_name will be used in following contents, --name is optional
pf run create --file run.yml --stream --name $run_name
```
You can also skip providing `column-mapping` if provided data has same column name as the flow.
Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI.
```bash
# list run
pf run list
# show run
pf run show --name $run_name
# show run outputs
pf run show-details --name $run_name
```
### 5. Run with classification evaluation flow
create `evaluation` run:
```bash
# (Optional) save previous run name into variable, and create a new random run name for further use
prev_run_name=$run_name
run_name="classification_accuracy_"$(openssl rand -hex 12)
# create run using command line args
pf run create --flow ../../evaluation/eval-classification-accuracy --data ./data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run $prev_run_name --stream
# create run using yaml file, --name is optional
pf run create --file run_evaluation.yml --run $prev_run_name --stream --name $run_name
```
```bash
pf run show-details --name $run_name
pf run show-metrics --name $run_name
pf run visualize --name $run_name
```
### 6. Submit run to cloud
```bash
# set default workspace
az account set -s <your_subscription_id>
az configure --defaults group=<your_resource_group_name> workspace=<your_workspace_name>
# create run
pfazure run create --flow . --data ./data.jsonl --column-mapping url='${data.url}' --stream
# (Optional) create a new random run name for further use
run_name="web_classification_"$(openssl rand -hex 12)
# create run using yaml file, --name is optional
pfazure run create --file run.yml --name $run_name
pfazure run stream --name $run_name
pfazure run show-details --name $run_name
pfazure run show-metrics --name $run_name
# (Optional) save previous run name into variable, and create a new random run name for further use
prev_run_name=$run_name
run_name="classification_accuracy_"$(openssl rand -hex 12)
# create evaluation run, --name is optional
pfazure run create --flow ../../evaluation/eval-classification-accuracy --data ./data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run $prev_run_name
pfazure run create --file run_evaluation.yml --run $prev_run_name --stream --name $run_name
pfazure run stream --name $run_name
pfazure run show --name $run_name
pfazure run show-details --name $run_name
pfazure run show-metrics --name $run_name
pfazure run visualize --name $run_name
``` | promptflow/examples/flows/standard/web-classification/README.md/0 | {
"file_path": "promptflow/examples/flows/standard/web-classification/README.md",
"repo_id": "promptflow",
"token_count": 1419
} | 21 |
my_tool_package.tools.tool_with_custom_llm_type.my_tool:
name: My Custom LLM Tool
description: This is a tool to demonstrate how to customize an LLM tool with a PromptTemplate.
type: custom_llm
module: my_tool_package.tools.tool_with_custom_llm_type
function: my_tool
inputs:
connection:
type:
- CustomConnection
| promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_custom_llm_type.yaml/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_custom_llm_type.yaml",
"repo_id": "promptflow",
"token_count": 121
} | 22 |
# Basic flow with package tool using cascading inputs
This is a flow demonstrating the use of a tool with cascading inputs which frequently used in situations where the selection in one input field determines what subsequent inputs should be shown,
and it helps in creating a more efficient, user-friendly, and error-free input process.
Tools used in this flow:
- `python` Tool
Connections used in this flow:
- None
## Prerequisites
Install promptflow sdk and other dependencies:
```bash
pip install -r requirements.txt
```
## Run flow
- Test flow
```bash
pf flow test --flow .
``` | promptflow/examples/tools/use-cases/cascading-inputs-tool-showcase/README.md/0 | {
"file_path": "promptflow/examples/tools/use-cases/cascading-inputs-tool-showcase/README.md",
"repo_id": "promptflow",
"token_count": 152
} | 23 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
website_name:
type: string
default: Microsoft
user_name:
type: string
default: ""
outputs:
output:
type: string
reference: ${my_custom_llm_tool.output}
nodes:
- name: my_custom_llm_tool
type: custom_llm
source:
type: package_with_prompt
tool: my_tool_package.tools.tool_with_custom_llm_type.my_tool
path: prompt_template.jinja2
inputs:
connection: basic_custom_connection
website_name: ${inputs.website_name}
user_name: ${inputs.user_name}
| promptflow/examples/tools/use-cases/custom_llm_tool_showcase/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/tools/use-cases/custom_llm_tool_showcase/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 238
} | 24 |
<#
.DESCRIPTION
Script to deploy promptflow to Azure App Service.
.PARAMETER path
The folder path to be deployed
.PARAMETER image_tag
The container image tag.
.PARAMETER registry
The container registry name, for example 'xx.azurecr.io'.
.PARAMETER name
The app name to produce a unique FQDN as AppName.azurewebsites.net.
.PARAMETER location
The app location, default to 'centralus'.
.PARAMETER sku
The app sku, default to 'F1'(free).
.PARAMETER resource_group
The app resource group.
.PARAMETER subscription
The app subscription, default using az account subscription.
.PARAMETER verbose
verbose mode.
.EXAMPLE
PS> .\deploy.ps1 -Path <folder-path> -Name my_app_23d8m -i <image_tag> -r <registry> -n <app_name> -g <resource_group>
.EXAMPLE
PS> .\deploy.ps1 -Path <folder-path> -Name my_app_23d8m -i <image_tag> -r <registry> -n <app_name> -g <resource_group> -Subscription "xxxx-xxxx-xxxx-xxxx-xxxx" -Verbose
#>
[CmdletBinding()]
param(
[string]$Path,
[Alias("i", "image_tag")][string]$ImageTag,
[Alias("r")][string]$Registry,
[Alias("n")][string]$Name,
[Alias("l")][string]$Location = "eastus",
[string]$Sku = "F1",
[Alias("g", "resource_group")][string]$ResourceGroup,
[string]$Subscription
)
####################### Validate args ############################
$ErrorActionPreference = "Stop"
# fail if image_tag not provided
if (!$ImageTag) {
Write-Host "***************************"
Write-Host "* Error: image_tag is required.*"
Write-Host "***************************"
exit 1
}
# check if : in image_tag
if (!$ImageTag.Contains(":")) {
$version="v$(Get-Date -Format 'yyyyMMdd-HHmmss')"
$image_tag="${ImageTag}:${version}"
}
Write-Host "image_tag: $ImageTag"
# fail if Registry not provided
if (!$Registry) {
Write-Host "***************************"
Write-Host "* Error: registry is required.*"
Write-Host "***************************"
exit
}
# fail if name not provided
if (!$Name) {
Write-Host "***************************"
Write-Host "* Error: name is required.*"
Write-Host "***************************"
exit
}
# fail if resource_group not provided
if (!$ResourceGroup) {
Write-Host "***************************"
Write-Host "* Error: resource_group is required.*"
Write-Host "***************************"
exit
}
# fail if image_tag not provided
if (!$Path) {
Write-Host "***************************"
Write-Host "* Error: Path is required.*"
Write-Host "***************************"
exit 1
}
####################### Build and push image ############################
Write-Host "Change working directory to $Path"
cd $Path
docker build -t "$ImageTag" .
if ($Registry.Contains("azurecr.io")) {
Write-Host "Trying to login to $Registry..."
az acr login -n "$Registry"
$AcrImageTag = $Registry + "/" + $ImageTag
Write-Host "ACR image tag: $AcrImageTag"
docker tag "$ImageTag" "$AcrImageTag"
$ImageTag = $AcrImageTag
}
else {
Write-Host "***************************************************\n"
Write-Host "* WARN: Make sure you have docker account login!!!*\n"
Write-Host "***************************************************\n"
$DockerImageTag = $Registry + "/" + $ImageTag
Write-Host "Docker image tag: $DockerImageTag"
docker tag "$ImageTag" "$DockerImageTag"
$ImageTag = $DockerImageTag
}
Write-Host "Start pushing image...$ImageTag"
docker push "$ImageTag"
####################### Create and config app ############################
function Append-To-Command {
param (
[string] $Command
)
if ($Subscription) {
$Command = "$Command --subscription $Subscription"
}
if ($VerbosePreference -eq "Continue") {
$Command="$Command --debug"
}
Write-Host "$Command"
return $Command
}
function Invoke-Expression-And-Check{
param (
[string]$Command
)
$Command=$(Append-To-Command "$Command")
Invoke-Expression $Command
if ($LASTEXITCODE -gt 0) {
exit $LASTEXITCODE
}
}
# Check and create resource group if not exist
$Result = (az group exists --name $ResourceGroup)
if ($Result -eq "false") {
Write-Host "Creating resource group...$ResourceGroup"
$Command="az group create --name $ResourceGroup -l $Location"
Invoke-Expression-And-Check "$Command"
}
# Create service plan
$ServicePlanName = $Name + "_service_plan"
Write-Host "Creating service plan...$ServicePlanName"
$Command="az appservice plan create --name $ServicePlanName --sku $Sku --location $location --is-linux -g $ResourceGroup"
Invoke-Expression-And-Check "$Command"
# Create app
Write-Host "Creating app...$Name"
$Command="az webapp create --name $Name -p $ServicePlanName --deployment-container-image-name $ImageTag --startup-file 'bash start.sh' -g $ResourceGroup"
Invoke-Expression-And-Check "$Command"
# Config environment variable
Write-Host "Config app...$Name"
$Command="az webapp config appsettings set -g $ResourceGroup --name $Name --settings USER_AGENT=promptflow-appservice ('@settings.json')"
Invoke-Expression-And-Check "$Command"
Write-Host "Please go to https://portal.azure.com/ to config environment variables and restart the app: $Name at (Settings>Configuration) or (Settings>Environment variables)"
Write-Host "Reach deployment logs at (Deployment>Deployment Central) and app logs at (Monitoring>Log stream)"
Write-Host "Reach advanced deployment tools at https://$Name.scm.azurewebsites.net/"
Write-Host "Reach more details about app service at https://learn.microsoft.com/en-us/azure/app-service/"
| promptflow/examples/tutorials/flow-deploy/azure-app-service/deploy.ps1/0 | {
"file_path": "promptflow/examples/tutorials/flow-deploy/azure-app-service/deploy.ps1",
"repo_id": "promptflow",
"token_count": 1878
} | 25 |
<PoliCheckExclusions>
<!-- All strings must be UPPER CASE -->
<!--index-xxx.js is an auto-generated javascript file - skipped given it's not expected to be readable -->
<Exclusion Type="FileName">SRC\PROMPTFLOW\PROMPTFLOW\_SDK\_SERVING\STATIC\INDEX.JS</Exclusion>
</PoliCheckExclusions>
| promptflow/scripts/compliance-check/user_exclusion.xml/0 | {
"file_path": "promptflow/scripts/compliance-check/user_exclusion.xml",
"repo_id": "promptflow",
"token_count": 99
} | 26 |
# Building the Windows MSI Installer
This document provides instructions on creating the MSI installer.
## Option1: Building with Github Actions
Trigger the [workflow](https://github.com/microsoft/promptflow/actions/workflows/build_msi_installer.yml) manually.
## Option2: Local Building
### Prerequisites
1. Turn on the '.NET Framework 3.5' Windows Feature (required for WIX Toolset).
2. Install 'Microsoft Build Tools 2015'.
https://www.microsoft.com/download/details.aspx?id=48159
3. You need to have curl.exe, unzip.exe and msbuild.exe available under PATH.
4. Install 'WIX Toolset build tools' following the instructions below.
- Enter the directory where the README is located (`cd scripts/installer/windows`), `mkdir wix` and `cd wix`.
- `curl --output wix-archive.zip https://azurecliprod.blob.core.windows.net/msi/wix310-binaries-mirror.zip`
- `unzip wix-archive.zip` and `del wix-archive.zip`
5. We recommend creating a clean virtual Python environment and installing all dependencies using src/promptflow/setup.py.
- `python -m venv venv`
- `venv\Scripts\activate`
- `pip install promptflow[azure,executable,pfs] promptflow-tools`
### Building
1. Update the version number `$(env.CLI_VERSION)` and `$(env.FILE_VERSION)` in `product.wxs`, `promptflow.wixproj` and `version_info.txt`.
2. `cd scripts/installer/windows/scripts` and run `pyinstaller promptflow.spec`.
3. `cd scripts/installer/windows` and Run `msbuild /t:rebuild /p:Configuration=Release /p:Platform=x64 promptflow.wixproj`.
4. The unsigned MSI will be in the `scripts/installer/windows/out` folder.
## Notes
- If you encounter "Access is denied" error when running promptflow. Please follow the [link](https://learn.microsoft.com/en-us/microsoft-365/security/defender-endpoint/attack-surface-reduction-rules-deployment-implement?view=o365-worldwide#customize-attack-surface-reduction-rules) to add the executable to the Windows Defender Attack Surface Reduction (ASR) rule. | promptflow/scripts/installer/windows/README.md/0 | {
"file_path": "promptflow/scripts/installer/windows/README.md",
"repo_id": "promptflow",
"token_count": 610
} | 27 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# flake8: noqa
# This file is part of scripts\generate_json_schema.py in sdk-cli-v2, which is used to generate json schema
# To use this script, run `python <this_file>` in promptflow env,
# and the json schema will be generated in the same folder.
from inspect import isclass
import json
from azure.ai.ml._schema import ExperimentalField
from promptflow._sdk.schemas._base import YamlFileSchema
from promptflow._sdk.schemas._fields import UnionField
from marshmallow import Schema, fields, missing
from marshmallow.class_registry import get_class
from marshmallow_jsonschema import JSONSchema
class PatchedJSONSchema(JSONSchema):
required = fields.Method("get_required")
properties = fields.Method("get_properties")
def __init__(self, *args, **kwargs):
"""Setup internal cache of nested fields, to prevent recursion.
:param bool props_ordered: if `True` order of properties will be save as declare in class,
else will using sorting, default is `False`.
Note: For the marshmallow scheme, also need to enable
ordering of fields too (via `class Meta`, attribute `ordered`).
"""
self._nested_schema_classes = {}
self.nested = kwargs.pop("nested", False)
self.props_ordered = kwargs.pop("props_ordered", False)
setattr(self.opts, "ordered", self.props_ordered)
super().__init__(*args, **kwargs)
# cspell: ignore pytype
def _from_python_type(self, obj, field, pytype):
metadata = field.metadata.get("metadata", {})
metadata.update(field.metadata)
# This is in the upcoming release of marshmallow-jsonschema, but not available yet
if isinstance(field, fields.Dict):
values = metadata.get("values", None) or field.value_field
json_schema = {"title": field.attribute or field.data_key or field.name}
json_schema["type"] = "object"
if values:
values.parent = field
json_schema["additionalProperties"] = self._get_schema_for_field(obj, values) if values else {}
return json_schema
if isinstance(field, fields.Raw):
json_schema = {"title": field.attribute or field.data_key or field.name}
return json_schema
return super()._from_python_type(obj, field, pytype)
def _get_schema_for_field(self, obj, field):
"""Get schema and validators for field."""
if hasattr(field, "_jsonschema_type_mapping"):
schema = field._jsonschema_type_mapping() # pylint: disable=protected-access
elif "_jsonschema_type_mapping" in field.metadata:
schema = field.metadata["_jsonschema_type_mapping"]
else:
if isinstance(field, UnionField):
schema = self._get_schema_for_union_field(obj, field)
elif isinstance(field, ExperimentalField):
schema = self._get_schema_for_field(obj, field.experimental_field)
elif isinstance(field, fields.Constant):
schema = {"const": field.constant}
else:
schema = super()._get_schema_for_field(obj, field)
if field.data_key:
schema["title"] = field.data_key
return schema
def _get_schema_for_union_field(self, obj, field):
has_yaml_option = False
schemas = []
for field_item in field._union_fields: # pylint: disable=protected-access
if isinstance(field_item, fields.Nested) and isinstance(field_item.schema, YamlFileSchema):
has_yaml_option = True
schemas.append(self._get_schema_for_field(obj, field_item))
if has_yaml_option:
schemas.append({"type": "string", "pattern": "^file:.*"})
if field.allow_none:
schemas.append({"type": "null"})
if field.is_strict:
schema = {"oneOf": schemas}
else:
schema = {"anyOf": schemas}
# This happens in the super() call to get_schema, doing here to allow for adding
# descriptions and other schema attributes from marshmallow metadata
metadata = field.metadata.get("metadata", {})
for md_key, md_val in metadata.items():
if md_key in ("metadata", "name"):
continue
schema[md_key] = md_val
return schema
def _from_nested_schema(self, obj, field):
"""patch in context for nested field"""
if isinstance(field.nested, (str, bytes)):
nested = get_class(field.nested)
else:
nested = field.nested
if isclass(nested) and issubclass(nested, Schema):
only = field.only
exclude = field.exclude
context = getattr(field.parent, "context", {})
field.nested = nested(only=only, exclude=exclude, context=context)
return super()._from_nested_schema(obj, field)
def get_properties(self, obj):
"""Fill out properties field."""
properties = self.dict_class()
if self.props_ordered:
fields_items_sequence = obj.fields.items()
else:
fields_items_sequence = sorted(obj.fields.items())
for _, field in fields_items_sequence:
schema = self._get_schema_for_field(obj, field)
properties[field.metadata.get("name") or field.data_key or field.name] = schema
return properties
def get_required(self, obj):
"""Fill out required field."""
required = []
for _, field in sorted(obj.fields.items()):
if field.required:
required.append(field.metadata.get("name") or field.data_key or field.name)
return required or missing
from promptflow._sdk.schemas._connection import AzureOpenAIConnectionSchema, OpenAIConnectionSchema, \
QdrantConnectionSchema, CognitiveSearchConnectionSchema, SerpConnectionSchema, AzureContentSafetyConnectionSchema, \
FormRecognizerConnectionSchema, CustomConnectionSchema, WeaviateConnectionSchema
from promptflow._sdk.schemas._run import RunSchema
from promptflow._sdk.schemas._flow import FlowSchema, EagerFlowSchema
if __name__ == "__main__":
cls_list = [FlowSchema, EagerFlowSchema]
schema_list = []
for cls in cls_list:
target_schema = PatchedJSONSchema().dump(cls(context={"base_path": "./"}))
# print(target_schema)
file_name = cls.__name__
file_name = file_name.replace("Schema", "")
schema_list.append(target_schema["definitions"][cls.__name__])
print(target_schema)
schema = {
"type": "object",
"oneOf": schema_list
}
with open((f"Flow.schema.json"), "w") as f:
f.write(json.dumps(schema, indent=4))
| promptflow/scripts/json_schema/gen_json_schema.py/0 | {
"file_path": "promptflow/scripts/json_schema/gen_json_schema.py",
"repo_id": "promptflow",
"token_count": 2887
} | 28 |
- name: {{ step_name }}
working-directory: {{ working_dir }}
run: |
if [[ -e .env ]]; then
pf connection create --file .env --name {{ connection_name }}
fi
if [[ -e azure_openai.yml ]]; then
pf connection create --file azure_openai.yml --name {{ connection_name }}
fi
pf connection list
| promptflow/scripts/readme/ghactions_driver/workflow_steps/step_env_create_aoai.yml.jinja2/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/workflow_steps/step_env_create_aoai.yml.jinja2",
"repo_id": "promptflow",
"token_count": 123
} | 29 |
import os
import glob
import argparse
from pathlib import Path
import ntpath
import re
import hashlib
import json
from jinja2 import Environment, FileSystemLoader
from ghactions_driver.readme_step import ReadmeStepsManage
from ghactions_driver.resource_resolver import resolve_tutorial_resource
from ghactions_driver.telemetry_obj import Telemetry
def format_ipynb(notebooks):
# run code formatter on .ipynb files
for notebook in notebooks:
os.system(f"black-nb --clear-output {notebook}")
def _get_paths(paths_list):
"""
Convert the path list to unix format.
:param paths_list: The input path list.
:returns: The same list with unix-like paths.
"""
paths_list.sort()
if ntpath.sep == os.path.sep:
return [pth.replace(ntpath.sep, "/") for pth in paths_list]
return paths_list
def write_notebook_workflow(notebook, name, output_telemetry=Telemetry()):
temp_name_list = re.split(r"/|\.", notebook)
temp_name_list = [
x
for x in temp_name_list
if x != "tutorials" and x != "examples" and x != "ipynb"
]
temp_name_list = [x.replace("-", "") for x in temp_name_list]
workflow_name = "_".join(["samples"] + temp_name_list)
place_to_write = (
Path(ReadmeStepsManage.git_base_dir())
/ ".github"
/ "workflows"
/ f"{workflow_name}.yml"
)
gh_working_dir = "/".join(notebook.split("/")[:-1])
env = Environment(
loader=FileSystemLoader("./scripts/readme/ghactions_driver/workflow_templates")
)
template = env.get_template("basic_workflow.yml.jinja2")
# Schedule notebooks at different times to reduce maximum quota usage.
name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16)
schedule_minute = name_hash % 60
schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC
if "tutorials" in gh_working_dir:
notebook_path = Path(ReadmeStepsManage.git_base_dir()) / str(notebook)
path_filter = resolve_tutorial_resource(workflow_name, notebook_path.resolve())
elif "samples_configuration" in workflow_name:
# exception, samples configuration is very simple and not related to other prompt flow examples
path_filter = (
"[ examples/configuration.ipynb, .github/workflows/samples_configuration.yml ]"
)
else:
path_filter = f"[ {gh_working_dir}/**, examples/*requirements.txt, .github/workflows/{workflow_name}.yml ]"
# these workflows require config.json to init PF/ML client
workflows_require_config_json = [
"configuration",
"flowinpipeline",
"quickstartazure",
"cloudrunmanagement",
]
if any(keyword in workflow_name for keyword in workflows_require_config_json):
template = env.get_template("workflow_config_json.yml.jinja2")
elif "chatwithpdf" in workflow_name:
template = env.get_template("pdf_workflow.yml.jinja2")
elif "flowasfunction" in workflow_name:
template = env.get_template("flow_as_function.yml.jinja2")
content = template.render(
{
"workflow_name": workflow_name,
"ci_name": "samples_notebook_ci",
"name": name,
"gh_working_dir": gh_working_dir,
"path_filter": path_filter,
"crontab": f"{schedule_minute} {schedule_hour} * * *",
"crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT",
}
)
# To customize workflow, add new steps in steps.py
# make another function for special cases.
with open(place_to_write.resolve(), "w") as f:
f.write(content)
print(f"Write workflow: {place_to_write.resolve()}")
output_telemetry.workflow_name = workflow_name
output_telemetry.name = name
output_telemetry.gh_working_dir = gh_working_dir
output_telemetry.path_filter = path_filter
def write_workflows(notebooks, output_telemetries=[]):
# process notebooks
for notebook in notebooks:
# get notebook name
output_telemetry = Telemetry()
nb_path = Path(notebook)
name, _ = os.path.splitext(nb_path.parts[-1])
# write workflow file
write_notebook_workflow(notebook, name, output_telemetry)
output_telemetry.notebook = nb_path
output_telemetries.append(output_telemetry)
def local_filter(callback, array):
results = []
for index, item in enumerate(array):
result = callback(item, index, array)
# if returned true, append item to results
if result:
results.append(item)
return results
def no_readme_generation_filter(item, index, array) -> bool:
"""
Set each ipynb metadata no_readme_generation to "true" to skip readme generation
"""
try:
if item.endswith("test.ipynb"):
return False
# read in notebook
with open(item, "r", encoding="utf-8") as f:
data = json.load(f)
try:
if data["metadata"]["no_readme_generation"] is not None:
# no_readme_generate == "true", then no generation
return data["metadata"]["no_readme_generation"] != "true"
except Exception:
return True # generate readme
except Exception:
return False # not generate readme
def main(input_glob, output_files=[], check=False):
# get list of workflows
notebooks = _get_paths(
[j for i in [glob.glob(p, recursive=True) for p in input_glob] for j in i]
)
# check each workflow, get metadata.
notebooks = local_filter(no_readme_generation_filter, notebooks)
# format code
if not check:
format_ipynb(notebooks)
# write workflows
write_workflows(notebooks, output_files)
# run functions
if __name__ == "__main__":
# setup argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"-g", "--input-glob", nargs="+", help="Input glob example 'examples/**/*.ipynb'"
)
args = parser.parse_args()
# call main
main(input_glob=args.input_glob)
| promptflow/scripts/readme/workflow_generator.py/0 | {
"file_path": "promptflow/scripts/readme/workflow_generator.py",
"repo_id": "promptflow",
"token_count": 2474
} | 30 |
import pytest
import unittest
from {{ package_name }}.tools.{{ tool_name }} import {{ class_name }}
@pytest.fixture
def my_url() -> str:
my_url = "https://www.bing.com"
return my_url
@pytest.fixture
def my_tool_provider(my_url) -> {{ class_name }}:
my_tool_provider = {{ class_name }}(my_url)
return my_tool_provider
class TestTool:
def test_{{ tool_name }}(self, my_tool_provider):
result = my_tool_provider.{{ function_name }}(query="Microsoft")
assert result == "Hello Microsoft"
# Run the unit tests
if __name__ == "__main__":
unittest.main()
| promptflow/scripts/tool/templates/test_tool2.py.j2/0 | {
"file_path": "promptflow/scripts/tool/templates/test_tool2.py.j2",
"repo_id": "promptflow",
"token_count": 231
} | 31 |
# Release History
## 1.0.0 (2023.11.30)
### Features Added
- Support openai 1.x in promptflow-tools
- Add new tool "OpenAI GPT-4V"
| promptflow/src/promptflow-tools/CHANGELOG.md/0 | {
"file_path": "promptflow/src/promptflow-tools/CHANGELOG.md",
"repo_id": "promptflow",
"token_count": 52
} | 32 |
try:
from openai import OpenAI as OpenAIClient
except Exception:
raise Exception(
"Please upgrade your OpenAI package to version 1.0.0 or later using the command: pip install --upgrade openai.")
from promptflow.connections import OpenAIConnection
from promptflow.contracts.types import PromptTemplate
from promptflow._internal import ToolProvider, tool
from promptflow.tools.common import render_jinja_template, handle_openai_error, \
parse_chat, post_process_chat_api_response, preprocess_template_string, \
find_referenced_image_set, convert_to_chat_list, normalize_connection_config
class OpenAI(ToolProvider):
def __init__(self, connection: OpenAIConnection):
super().__init__()
self._connection_dict = normalize_connection_config(connection)
self._client = OpenAIClient(**self._connection_dict)
@tool(streaming_option_parameter="stream")
@handle_openai_error()
def chat(
self,
prompt: PromptTemplate,
model: str = "gpt-4-vision-preview",
temperature: float = 1.0,
top_p: float = 1.0,
# stream is a hidden to the end user, it is only supposed to be set by the executor.
stream: bool = False,
stop: list = None,
max_tokens: int = None,
presence_penalty: float = 0,
frequency_penalty: float = 0,
**kwargs,
) -> [str, dict]:
# keep_trailing_newline=True is to keep the last \n in the prompt to avoid converting "user:\t\n" to "user:".
prompt = preprocess_template_string(prompt)
referenced_images = find_referenced_image_set(kwargs)
# convert list type into ChatInputList type
converted_kwargs = convert_to_chat_list(kwargs)
chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **converted_kwargs)
messages = parse_chat(chat_str, list(referenced_images))
params = {
"model": model,
"messages": messages,
"temperature": temperature,
"top_p": top_p,
"n": 1,
"stream": stream,
"presence_penalty": presence_penalty,
"frequency_penalty": frequency_penalty,
}
if stop:
params["stop"] = stop
if max_tokens is not None:
params["max_tokens"] = max_tokens
completion = self._client.chat.completions.create(**params)
return post_process_chat_api_response(completion, stream, None)
| promptflow/src/promptflow-tools/promptflow/tools/openai_gpt4v.py/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/openai_gpt4v.py",
"repo_id": "promptflow",
"token_count": 1021
} | 33 |
from unittest.mock import patch
import pytest
import json
from promptflow.connections import AzureOpenAIConnection
from promptflow.tools.aoai import chat, completion
from promptflow.tools.exception import WrappedOpenAIError
from tests.utils import AttrDict
@pytest.mark.usefixtures("use_secrets_config_file")
class TestAOAI:
def test_aoai_completion(self, aoai_provider):
prompt_template = "please complete this sentence: world war II "
# test whether tool can handle param "stop" with value empty list
# as openai raises "[] is not valid under any of the given schemas - 'stop'"
aoai_provider.completion(
prompt=prompt_template, deployment_name="gpt-35-turbo-instruct", stop=[], logit_bias={}
)
def test_aoai_stream_completion(self, aoai_provider):
prompt_template = "please complete this sentence: world war II "
# test whether tool can handle param "stop" with value empty list in stream mode
# as openai raises "[] is not valid under any of the given schemas - 'stop'"
aoai_provider.completion(
prompt=prompt_template, deployment_name="gpt-35-turbo-instruct", stop=[], logit_bias={}, stream=True
)
def test_aoai_chat(self, aoai_provider, example_prompt_template, chat_history):
result = aoai_provider.chat(
prompt=example_prompt_template,
deployment_name="gpt-35-turbo",
max_tokens="32",
temperature=0,
user_input="Fill in more details about trend 2.",
chat_history=chat_history,
)
assert "additional details" in result.lower()
def test_aoai_chat_api(self, azure_open_ai_connection, example_prompt_template, chat_history):
result = chat(
connection=azure_open_ai_connection,
prompt=example_prompt_template,
deployment_name="gpt-35-turbo",
max_tokens="inF",
temperature=0,
user_input="Write a slogan for product X",
chat_history=chat_history,
)
assert "Product X".lower() in result.lower()
@pytest.mark.parametrize(
"function_call",
[
"auto",
{"name": "get_current_weather"},
],
)
def test_aoai_chat_with_function(
self, azure_open_ai_connection, example_prompt_template, chat_history, functions, function_call):
result = chat(
connection=azure_open_ai_connection,
prompt=example_prompt_template,
deployment_name="gpt-35-turbo",
max_tokens="inF",
temperature=0,
user_input="What is the weather in Boston?",
chat_history=chat_history,
functions=functions,
function_call=function_call
)
assert "function_call" in result
assert result["function_call"]["name"] == "get_current_weather"
def test_aoai_chat_with_name_in_roles(
self, azure_open_ai_connection, example_prompt_template_with_name_in_roles, chat_history, functions):
result = chat(
connection=azure_open_ai_connection,
prompt=example_prompt_template_with_name_in_roles,
deployment_name="gpt-35-turbo",
max_tokens="inF",
temperature=0,
functions=functions,
name="get_location",
result=json.dumps({"location": "Austin"}),
question="What is the weather in Boston?",
prev_question="Where is Boston?"
)
assert "function_call" in result
assert result["function_call"]["name"] == "get_current_weather"
def test_aoai_chat_message_with_no_content(self, aoai_provider):
# missing colon after role name. Sometimes following prompt may result in empty content.
prompt = (
"user:\n what is your name\nassistant\nAs an AI language model developed by"
" OpenAI, I do not have a name. You can call me OpenAI or AI assistant. "
"How can I assist you today?"
)
# assert chat tool can handle.
aoai_provider.chat(prompt=prompt, deployment_name="gpt-35-turbo")
# empty content after role name:\n
prompt = "user:\n"
aoai_provider.chat(prompt=prompt, deployment_name="gpt-35-turbo")
def test_aoai_stream_chat(self, aoai_provider, example_prompt_template, chat_history):
result = aoai_provider.chat(
prompt=example_prompt_template,
deployment_name="gpt-35-turbo",
max_tokens="32",
temperature=0,
user_input="Fill in more details about trend 2.",
chat_history=chat_history,
stream=True,
)
answer = ""
while True:
try:
answer += next(result)
except Exception:
break
assert "additional details" in answer.lower()
@pytest.mark.parametrize(
"params, expected",
[
({"stop": [], "logit_bias": {}}, {"stop": None}),
({"stop": ["</i>"], "logit_bias": {"16": 100, "17": 100}}, {}),
],
)
def test_aoai_parameters(self, params, expected):
for k, v in params.items():
if k not in expected:
expected[k] = v
deployment_name = "dummy"
conn_dict = {"api_key": "dummy", "api_base": "base", "api_version": "dummy_ver", "api_type": "azure"}
conn = AzureOpenAIConnection(**conn_dict)
def mock_completion(self, **kwargs):
assert kwargs["model"] == deployment_name
for k, v in expected.items():
assert kwargs[k] == v, f"Expect {k} to be {v}, but got {kwargs[k]}"
text = kwargs["prompt"]
return AttrDict({"choices": [AttrDict({"text": text})]})
with patch("openai.resources.Completions.create", new=mock_completion):
prompt = "dummy_prompt"
result = completion(connection=conn, prompt=prompt, deployment_name=deployment_name, **params)
assert result == prompt
def test_aoai_chat_with_response_format(
self,
azure_open_ai_connection,
example_prompt_template,
chat_history):
result = chat(
connection=azure_open_ai_connection,
prompt=example_prompt_template,
deployment_name="gpt-35-turbo-1106",
temperature=0,
user_input="Write a slogan for product X, please response with json.",
chat_history=chat_history,
response_format={"type": "json_object"}
)
assert "x:".lower() in result.lower()
@pytest.mark.parametrize(
"response_format, user_input, error_message, error_codes, exception",
[
({"type": "json"}, "Write a slogan for product X, please response with json.",
"\'json\' is not one of [\'json_object\', \'text\']", "UserError/OpenAIError/BadRequestError",
WrappedOpenAIError),
({"type": "json_object"}, "Write a slogan for product X",
"\'messages\' must contain the word \'json\' in some form", "UserError/OpenAIError/BadRequestError",
WrappedOpenAIError),
({"types": "json_object"}, "Write a slogan for product X",
"The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}",
"UserError/OpenAIError/BadRequestError",
WrappedOpenAIError)
]
)
def test_aoai_chat_with_invalid_response_format(
self,
azure_open_ai_connection,
example_prompt_template,
chat_history,
response_format,
user_input,
error_message,
error_codes,
exception
):
with pytest.raises(exception) as exc_info:
chat(
connection=azure_open_ai_connection,
prompt=example_prompt_template,
deployment_name="gpt-35-turbo-1106",
temperature=0,
user_input=user_input,
chat_history=chat_history,
response_format=response_format
)
assert error_message in exc_info.value.message
assert exc_info.value.error_codes == error_codes.split("/")
def test_aoai_chat_with_not_support_response_format_json_mode_model(
self,
azure_open_ai_connection,
example_prompt_template,
chat_history
):
with pytest.raises(WrappedOpenAIError) as exc_info:
chat(
connection=azure_open_ai_connection,
prompt=example_prompt_template,
deployment_name="gpt-35-turbo",
temperature=0,
user_input="Write a slogan for product X, please response with json.",
chat_history=chat_history,
response_format={"type": "json_object"}
)
error_message = "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}."
assert error_message in exc_info.value.message
assert exc_info.value.error_codes == "UserError/OpenAIError/BadRequestError".split("/")
def test_aoai_chat_with_response_format_text_mode(
self,
azure_open_ai_connection,
example_prompt_template,
chat_history
):
result = chat(
connection=azure_open_ai_connection,
prompt=example_prompt_template,
deployment_name="gpt-35-turbo",
temperature=0,
user_input="Write a slogan for product X.",
chat_history=chat_history,
response_format={"type": "text"}
)
assert "Product X".lower() in result.lower()
| promptflow/src/promptflow-tools/tests/test_aoai.py/0 | {
"file_path": "promptflow/src/promptflow-tools/tests/test_aoai.py",
"repo_id": "promptflow",
"token_count": 4560
} | 34 |
DEFAULT_SUBSCRIPTION_ID="your-subscription-id"
DEFAULT_RESOURCE_GROUP_NAME="your-resource-group-name"
DEFAULT_WORKSPACE_NAME="your-workspace-name"
DEFAULT_RUNTIME_NAME="test-runtime-ci"
PROMPT_FLOW_TEST_MODE="replay"
| promptflow/src/promptflow/.env.example/0 | {
"file_path": "promptflow/src/promptflow/.env.example",
"repo_id": "promptflow",
"token_count": 85
} | 35 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import argparse
import importlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import webbrowser
from pathlib import Path
from promptflow._cli._params import (
add_param_config,
add_param_entry,
add_param_environment_variables,
add_param_flow_display_name,
add_param_function,
add_param_inputs,
add_param_prompt_template,
add_param_source,
add_param_yes,
add_parser_build,
base_params,
)
from promptflow._cli._pf._init_entry_generators import (
AzureOpenAIConnectionGenerator,
ChatFlowDAGGenerator,
FlowDAGGenerator,
OpenAIConnectionGenerator,
StreamlitFileReplicator,
ToolMetaGenerator,
ToolPyGenerator,
copy_extra_files,
)
from promptflow._cli._pf._run import exception_handler
from promptflow._cli._utils import _copy_to_flow, activate_action, confirm, inject_sys_path, list_of_dict_to_dict
from promptflow._constants import LANGUAGE_KEY, FlowLanguage
from promptflow._sdk._constants import PROMPT_FLOW_DIR_NAME, ConnectionProvider
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.operations._flow_operations import FlowOperations
from promptflow._utils.logger_utils import get_cli_sdk_logger
from promptflow.exceptions import ErrorTarget, UserErrorException
DEFAULT_CONNECTION = "open_ai_connection"
DEFAULT_DEPLOYMENT = "gpt-35-turbo"
logger = get_cli_sdk_logger()
def add_flow_parser(subparsers):
"""Add flow parser to the pf subparsers."""
flow_parser = subparsers.add_parser(
"flow",
description="Manage flows for promptflow.",
help="pf flow",
)
flow_subparsers = flow_parser.add_subparsers()
add_parser_init_flow(flow_subparsers)
add_parser_test_flow(flow_subparsers)
add_parser_serve_flow(flow_subparsers)
add_parser_build(flow_subparsers, "flow")
add_parser_validate_flow(flow_subparsers)
flow_parser.set_defaults(action="flow")
def dispatch_flow_commands(args: argparse.Namespace):
if args.sub_action == "init":
init_flow(args)
elif args.sub_action == "test":
test_flow(args)
elif args.sub_action == "serve":
serve_flow(args)
elif args.sub_action == "build":
build_flow(args)
elif args.sub_action == "validate":
validate_flow(args)
def add_parser_init_flow(subparsers):
"""Add flow create parser to the pf flow subparsers."""
epilog = """
Examples:
# Creating a flow folder with code/prompts and yaml definitions of the flow:
pf flow init --flow my-awesome-flow
# Creating an eval prompt flow:
pf flow init --flow my-awesome-flow --type evaluation
# Creating a flow in existing folder
pf flow init --flow intent_copilot --entry intent.py --function extract_intent --prompt-template prompt_template=tpl.jinja2
""" # noqa: E501
add_param_type = lambda parser: parser.add_argument( # noqa: E731
"--type",
type=str,
choices=["standard", "evaluation", "chat"],
help="The initialized flow type.",
default="standard",
)
add_param_connection = lambda parser: parser.add_argument( # noqa: E731
"--connection", type=str, help=argparse.SUPPRESS
)
add_param_deployment = lambda parser: parser.add_argument( # noqa: E731
"--deployment", type=str, help=argparse.SUPPRESS
)
add_params = [
add_param_type,
add_param_yes,
add_param_flow_display_name,
add_param_entry,
add_param_function,
add_param_prompt_template,
add_param_connection,
add_param_deployment,
] + base_params
activate_action(
name="init",
description="Creating a flow folder with code/prompts and yaml definitions of the flow.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Initialize a prompt flow directory.",
action_param_name="sub_action",
)
def add_parser_serve_flow(subparsers):
"""Add flow serve parser to the pf flow subparsers."""
epilog = """
Examples:
# Serve flow as an endpoint:
pf flow serve --source <path_to_flow>
# Serve flow as an endpoint with specific port and host:
pf flow serve --source <path_to_flow> --port 8080 --host localhost --environment-variables key1="`${my_connection.api_key}" key2="value2"
# Serve flow without opening browser:
pf flow serve --source <path_to_flow> --skip-open-browser
""" # noqa: E501
add_param_port = lambda parser: parser.add_argument( # noqa: E731
"--port", type=int, default=8080, help="The port on which endpoint to run."
)
add_param_host = lambda parser: parser.add_argument( # noqa: E731
"--host", type=str, default="localhost", help="The host of endpoint."
)
add_param_static_folder = lambda parser: parser.add_argument( # noqa: E731
"--static_folder", type=str, help=argparse.SUPPRESS
)
add_param_skip_browser = lambda parser: parser.add_argument( # noqa: E731
"--skip-open-browser", action="store_true", default=False, help="Skip open browser for flow serving."
)
activate_action(
name="serve",
description="Serving a flow as an endpoint.",
epilog=epilog,
add_params=[
add_param_source,
add_param_port,
add_param_host,
add_param_static_folder,
add_param_environment_variables,
add_param_config,
add_param_skip_browser,
]
+ base_params,
subparsers=subparsers,
help_message="Serving a flow as an endpoint.",
action_param_name="sub_action",
)
def add_parser_validate_flow(subparsers):
"""Add flow validate parser to the pf flow subparsers."""
epilog = """
Examples:
# Validate flow
pf flow validate --source <path_to_flow>
""" # noqa: E501
activate_action(
name="validate",
description="Validate a flow and generate flow.tools.json for the flow.",
epilog=epilog,
add_params=[
add_param_source,
]
+ base_params,
subparsers=subparsers,
help_message="Validate a flow. Will raise error if the flow is not valid.",
action_param_name="sub_action",
)
def add_parser_test_flow(subparsers):
"""Add flow test parser to the pf flow subparsers."""
epilog = """
Examples:
# Test the flow:
pf flow test --flow my-awesome-flow
# Test the flow with inputs:
pf flow test --flow my-awesome-flow --inputs key1=val1 key2=val2
# Test the flow with specified variant node:
pf flow test --flow my-awesome-flow --variant ${node_name.variant_name}
# Test the single node in the flow:
pf flow test --flow my-awesome-flow --node node_name
# Chat in the flow:
pf flow test --flow my-awesome-flow --node node_name --interactive
""" # noqa: E501
add_param_flow = lambda parser: parser.add_argument( # noqa: E731
"--flow", type=str, required=True, help="the flow directory to test."
)
add_param_node = lambda parser: parser.add_argument( # noqa: E731
"--node", type=str, help="the node name in the flow need to be tested."
)
add_param_variant = lambda parser: parser.add_argument( # noqa: E731
"--variant", type=str, help="Node & variant name in format of ${node_name.variant_name}."
)
add_param_interactive = lambda parser: parser.add_argument( # noqa: E731
"--interactive", action="store_true", help="start a interactive chat session for chat flow."
)
add_param_multi_modal = lambda parser: parser.add_argument( # noqa: E731
"--multi-modal", action="store_true", help=argparse.SUPPRESS
)
add_param_ui = lambda parser: parser.add_argument("--ui", action="store_true", help=argparse.SUPPRESS) # noqa: E731
add_param_input = lambda parser: parser.add_argument("--input", type=str, help=argparse.SUPPRESS) # noqa: E731
add_param_detail = lambda parser: parser.add_argument( # noqa: E731
"--detail", type=str, default=None, required=False, help=argparse.SUPPRESS
)
add_params = [
add_param_flow,
add_param_node,
add_param_variant,
add_param_interactive,
add_param_input,
add_param_inputs,
add_param_environment_variables,
add_param_multi_modal,
add_param_ui,
add_param_config,
add_param_detail,
] + base_params
activate_action(
name="test",
description="Test the flow.",
epilog=epilog,
add_params=add_params,
subparsers=subparsers,
help_message="Test the prompt flow or flow node.",
action_param_name="sub_action",
)
def init_flow(args):
if any([args.entry, args.prompt_template]):
print("Creating flow from existing folder...")
prompt_tpl = {}
if args.prompt_template:
for _dct in args.prompt_template:
prompt_tpl.update(**_dct)
_init_existing_flow(args.flow, args.entry, args.function, prompt_tpl)
else:
# Create an example flow
print("Creating flow from scratch...")
_init_flow_by_template(args.flow, args.type, args.yes, args.connection, args.deployment)
def _init_existing_flow(flow_name, entry=None, function=None, prompt_params: dict = None):
flow_path = Path(flow_name).resolve()
if not function:
logger.error("--function must be specified when --entry is specified.")
return
if not flow_path.exists():
logger.error(f"{flow_path.resolve()} must exist when --entry specified.")
return
print(f"Change working directory to .. {flow_path.resolve()}")
os.chdir(flow_path)
entry = Path(entry).resolve()
if not entry.exists():
logger.error(f"{entry} must exist.")
return
with inject_sys_path(flow_path):
# import function object
function_obj = getattr(importlib.import_module(entry.stem), function)
# Create tool.py
tool_py = f"{function}_tool.py"
python_tool = ToolPyGenerator(entry, function, function_obj)
tools = ToolMetaGenerator(tool_py, function, function_obj, prompt_params)
python_tool_inputs = [arg.name for arg in python_tool.tool_arg_list]
for tool_input in tools.prompt_params.keys():
if tool_input not in python_tool_inputs:
error = ValueError(f"Template parameter {tool_input} doesn't find in python function arguments.")
raise UserErrorException(target=ErrorTarget.CONTROL_PLANE_SDK, message=str(error), error=error)
python_tool.generate_to_file(tool_py)
# Create .promptflow and flow.tools.json
meta_dir = flow_path / PROMPT_FLOW_DIR_NAME
meta_dir.mkdir(parents=True, exist_ok=True)
tools.generate_to_file(meta_dir / "flow.tools.json")
# Create flow.dag.yaml
FlowDAGGenerator(tool_py, function, function_obj, prompt_params).generate_to_file("flow.dag.yaml")
copy_extra_files(flow_path=flow_path, extra_files=["requirements.txt", ".gitignore"])
print(f"Done. Generated flow in folder: {flow_path.resolve()}.")
def _init_chat_flow(flow_name, flow_path, connection=None, deployment=None):
from promptflow._sdk._configuration import Configuration
example_flow_path = Path(__file__).parent.parent / "data" / "chat_flow" / "flow_files"
for item in list(example_flow_path.iterdir()):
_copy_to_flow(flow_path=flow_path, source_file=item)
# Generate flow.dag.yaml to chat flow.
connection = connection or DEFAULT_CONNECTION
deployment = deployment or DEFAULT_DEPLOYMENT
ChatFlowDAGGenerator(connection=connection, deployment=deployment).generate_to_file(flow_path / "flow.dag.yaml")
# When customer not configure the remote connection provider, create connection yaml to chat flow.
is_local_connection = Configuration.get_instance().get_connection_provider() == ConnectionProvider.LOCAL
if is_local_connection:
OpenAIConnectionGenerator(connection=connection).generate_to_file(flow_path / "openai.yaml")
AzureOpenAIConnectionGenerator(connection=connection).generate_to_file(flow_path / "azure_openai.yaml")
copy_extra_files(flow_path=flow_path, extra_files=["requirements.txt", ".gitignore"])
print(f"Done. Created chat flow folder: {flow_path.resolve()}.")
if is_local_connection:
print(
f"The generated chat flow is requiring a connection named {connection}, "
"please follow the steps in README.md to create if you haven't done that."
)
else:
print(
f"The generated chat flow is requiring a connection named {connection}, "
"please ensure it exists in workspace."
)
flow_test_command = f"pf flow test --flow {flow_name} --interactive"
print(f"You can execute this command to test the flow, {flow_test_command}")
def _init_standard_or_evaluation_flow(flow_name, flow_path, flow_type):
example_flow_path = Path(__file__).parent.parent / "data" / f"{flow_type}_flow"
for item in list(example_flow_path.iterdir()):
_copy_to_flow(flow_path=flow_path, source_file=item)
copy_extra_files(flow_path=flow_path, extra_files=["requirements.txt", ".gitignore"])
print(f"Done. Created {flow_type} flow folder: {flow_path.resolve()}.")
flow_test_command = f"pf flow test --flow {flow_name} --input {os.path.join(flow_name, 'data.jsonl')}"
print(f"You can execute this command to test the flow, {flow_test_command}")
def _init_flow_by_template(flow_name, flow_type, overwrite=False, connection=None, deployment=None):
flow_path = Path(flow_name)
if flow_path.exists():
if not flow_path.is_dir():
logger.error(f"{flow_path.resolve()} is not a folder.")
return
answer = confirm(
"The flow folder already exists, do you want to create the flow in this existing folder?", overwrite
)
if not answer:
print("The 'pf init' command has been cancelled.")
return
flow_path.mkdir(parents=True, exist_ok=True)
if flow_type == "chat":
_init_chat_flow(flow_name=flow_name, flow_path=flow_path, connection=connection, deployment=deployment)
else:
_init_standard_or_evaluation_flow(flow_name=flow_name, flow_path=flow_path, flow_type=flow_type)
@exception_handler("Flow test")
def test_flow(args):
from promptflow._sdk._load_functions import load_flow
config = list_of_dict_to_dict(args.config)
pf_client = PFClient(config=config)
if args.environment_variables:
environment_variables = list_of_dict_to_dict(args.environment_variables)
else:
environment_variables = {}
inputs = {}
if args.input:
from promptflow._utils.load_data import load_data
if args.input and not args.input.endswith(".jsonl"):
error = ValueError("Only support jsonl file as input.")
raise UserErrorException(
target=ErrorTarget.CONTROL_PLANE_SDK,
message=str(error),
error=error,
)
inputs = load_data(local_path=args.input)[0]
if args.inputs:
inputs.update(list_of_dict_to_dict(args.inputs))
if args.multi_modal or args.ui:
with tempfile.TemporaryDirectory() as temp_dir:
flow = load_flow(args.flow)
script_path = [
os.path.join(temp_dir, "main.py"),
os.path.join(temp_dir, "utils.py"),
os.path.join(temp_dir, "logo.png"),
]
for script in script_path:
StreamlitFileReplicator(
flow_name=flow.display_name if flow.display_name else flow.name,
flow_dag_path=flow.flow_dag_path,
).generate_to_file(script)
main_script_path = os.path.join(temp_dir, "main.py")
pf_client.flows._chat_with_ui(script=main_script_path)
else:
if args.interactive:
pf_client.flows._chat(
flow=args.flow,
inputs=inputs,
environment_variables=environment_variables,
variant=args.variant,
show_step_output=args.verbose,
)
else:
result = pf_client.flows.test(
flow=args.flow,
inputs=inputs,
environment_variables=environment_variables,
variant=args.variant,
node=args.node,
allow_generator_output=False,
stream_output=False,
dump_test_result=True,
detail=args.detail,
)
# Print flow/node test result
if isinstance(result, dict):
print(json.dumps(result, indent=4, ensure_ascii=False))
else:
print(result)
def serve_flow(args):
from promptflow._sdk._load_functions import load_flow
logger.info("Start serve model: %s", args.source)
# Set environment variable for local test
source = Path(args.source)
logger.info(
"Start promptflow server with port %s",
args.port,
)
os.environ["PROMPTFLOW_PROJECT_PATH"] = source.absolute().as_posix()
flow = load_flow(args.source)
if flow.dag.get(LANGUAGE_KEY, FlowLanguage.Python) == FlowLanguage.CSharp:
serve_flow_csharp(args, source)
else:
serve_flow_python(args, source)
logger.info("Promptflow app ended")
def serve_flow_csharp(args, source):
from promptflow.batch._csharp_executor_proxy import EXECUTOR_SERVICE_DLL
try:
# Change working directory to model dir
logger.info(f"Change working directory to model dir {source}")
os.chdir(source)
command = [
"dotnet",
EXECUTOR_SERVICE_DLL,
"--port",
str(args.port),
"--yaml_path",
"flow.dag.yaml",
"--assembly_folder",
".",
"--connection_provider_url",
"",
"--log_path",
"",
"--serving",
]
subprocess.run(command, stdout=sys.stdout, stderr=sys.stderr)
except KeyboardInterrupt:
pass
def _resolve_python_flow_additional_includes(source) -> Path:
# Resolve flow additional includes
from promptflow import load_flow
flow = load_flow(source)
with FlowOperations._resolve_additional_includes(flow.path) as resolved_flow_path:
if resolved_flow_path == flow.path:
return source
# Copy resolved flow to temp folder if additional includes exists
# Note: DO NOT use resolved flow path directly, as when inner logic raise exception,
# temp dir will fail due to file occupied by other process.
temp_flow_path = Path(tempfile.TemporaryDirectory().name)
shutil.copytree(src=resolved_flow_path.parent, dst=temp_flow_path, dirs_exist_ok=True)
return temp_flow_path
def serve_flow_python(args, source):
from promptflow._sdk._serving.app import create_app
static_folder = args.static_folder
if static_folder:
static_folder = Path(static_folder).absolute().as_posix()
config = list_of_dict_to_dict(args.config)
source = _resolve_python_flow_additional_includes(source)
os.environ["PROMPTFLOW_PROJECT_PATH"] = source.absolute().as_posix()
logger.info(f"Change working directory to model dir {source}")
os.chdir(source)
app = create_app(
static_folder=static_folder,
environment_variables=list_of_dict_to_dict(args.environment_variables),
config=config,
)
if not args.skip_open_browser:
target = f"http://{args.host}:{args.port}"
logger.info(f"Opening browser {target}...")
webbrowser.open(target)
# Debug is not supported for now as debug will rerun command, and we changed working directory.
app.run(port=args.port, host=args.host)
def build_flow(args):
"""
i. `pf flow build --source <flow_folder> --output <output_folder> --variant <variant>`
ii. `pf flow build --source <flow_folder> --format docker --output <output_folder> --variant <variant>`
iii. `pf flow build --source <flow_folder> --format executable --output <output_folder> --variant <variant>`
# default to resolve variant and update flow.dag.yaml, support this in case customer want to keep the
variants for continuous development
# we can delay this before receiving specific customer request
v. `pf flow build --source <flow_folder> --output <output_folder> --keep-variants`
output structure:
flow/
.connections/
Dockerfile|executable.exe
...
"""
pf_client = PFClient()
pf_client.flows.build(
flow=args.source,
output=args.output,
format=args.format,
variant=args.variant,
flow_only=args.flow_only,
)
print(
f"Exported flow to {Path(args.output).absolute().as_posix()}.\n"
f"please check {Path(args.output).joinpath('README.md').absolute().as_posix()} "
f"for how to use it."
)
def validate_flow(args):
pf_client = PFClient()
validation_result = pf_client.flows.validate(
flow=args.source,
)
print(repr(validation_result))
if not validation_result.passed:
exit(1)
else:
exit(0)
| promptflow/src/promptflow/promptflow/_cli/_pf/_flow.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf/_flow.py",
"repo_id": "promptflow",
"token_count": 8800
} | 36 |
# Chat flow
Chat flow is designed for conversational application development, building upon the capabilities of standard flow and providing enhanced support for chat inputs/outputs and chat history management. With chat flow, you can easily create a chatbot that handles chat input and output.
## Create connection for LLM tool to use
You can follow these steps to create a connection required by a LLM tool.
Currently, there are two connection types supported by LLM tool: "AzureOpenAI" and "OpenAI". If you want to use "AzureOpenAI" connection type, you need to create an Azure OpenAI service first. Please refer to [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service/) for more details. If you want to use "OpenAI" connection type, you need to create an OpenAI account first. Please refer to [OpenAI](https://platform.openai.com/) for more details.
```bash
# Override keys with --set to avoid yaml file changes
# Create open ai connection
pf connection create --file openai.yaml --set api_key=<your_api_key> --name open_ai_connection
# Create azure open ai connection
# pf connection create --file azure_openai.yaml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Note in [flow.dag.yaml](flow.dag.yaml) we are using connection named `open_ai_connection`.
```bash
# show registered connection
pf connection show --name open_ai_connection
```
Please refer to connections [document](https://promptflow.azurewebsites.net/community/local/manage-connections.html) and [example](https://github.com/microsoft/promptflow/tree/main/examples/connections) for more details.
## Develop a chat flow
The most important elements that differentiate a chat flow from a standard flow are **Chat Input**, **Chat History**, and **Chat Output**.
- **Chat Input**: Chat input refers to the messages or queries submitted by users to the chatbot. Effectively handling chat input is crucial for a successful conversation, as it involves understanding user intentions, extracting relevant information, and triggering appropriate responses.
- **Chat History**: Chat history is the record of all interactions between the user and the chatbot, including both user inputs and AI-generated outputs. Maintaining chat history is essential for keeping track of the conversation context and ensuring the AI can generate contextually relevant responses. Chat History is a special type of chat flow input, that stores chat messages in a structured format.
- **Chat Output**: Chat output refers to the AI-generated messages that are sent to the user in response to their inputs. Generating contextually appropriate and engaging chat outputs is vital for a positive user experience.
A chat flow can have multiple inputs, but Chat History and Chat Input are required inputs in chat flow.
## Interact with chat flow
Promptflow CLI provides a way to start an interactive chat session for chat flow. Customer can use below command to start an interactive chat session:
```
pf flow test --flow <flow_folder> --interactive
```
After executing this command, customer can interact with the chat flow in the terminal. Customer can press **Enter** to send the message to chat flow. And customer can quit with **ctrl+C**.
Promptflow CLI will distinguish the output of different roles by color, <span style="color:Green">User input</span>, <span style="color:Gold">Bot output</span>, <span style="color:Blue">Flow script output</span>, <span style="color:Cyan">Node output</span>.
> =========================================<br>
> Welcome to chat flow, <You-flow-name>.<br>
> Press Enter to send your message.<br>
> You can quit with ctrl+C.<br>
> =========================================<br>
> <span style="color:Green">User:</span> What types of container software there are<br>
> <span style="color:Gold">Bot:</span> There are several types of container software available, including:<br>
> 1. Docker: This is one of the most popular containerization software that allows developers to package their applications into containers and deploy them across different environments.<br>
> 2. Kubernetes: This is an open-source container orchestration platform that automates the deployment, scaling, and management of containerized applications.<br>
>
> <span style="color:Green">User:</span> What's the different between them<br>
> <span style="color:Gold">Bot:</span> The main difference between the various container software systems is their functionality and purpose. Here are some key differences between them:<br>
> 1. Docker is more focused on container packaging and deployment, while Kubernetes is more focused on container orchestration and management.<br>
> 2. Kubernetes: Kubernetes is a container orchestration tool that helps manage and deploy containers at scale. It automates the deployment, scaling, and management of containerized applications across multiple hosts.<br>
If customer adds "--verbose" in the pf command, the output of each step will be displayed.
> =========================================<br>
> Welcome to chat flow, Template Chat Flow.<br>
> Press Enter to send your message.<br>
> You can quit with ctrl+C.<br>
> =========================================<br>
> <span style="color:Green">User:</span> What types of container software there are<br>
> <span style="color:Cyan">chat:</span> There are several types of container software available, including:<br>
> 1. Docker: A popular container platform that is widely used in the industry.<br>
> 2. Kubernetes: A container orchestration tool that helps manage and deploy containers at scale.<br>
>
> <span style="color:Gold">Bot:</span> There are several types of container software available, including:<br>
> 1. Docker: A popular container platform that is widely used in the industry.<br>
> 2. Kubernetes: A container orchestration tool that helps manage and deploy containers at scale.<br>
>
> <span style="color:Green">User:</span> What's the different between them<br>
> <span style="color:Cyan">chat:</span> The main differences between container software are in their architecture, feature sets, and use cases. Here are some brief explanations of the differences between the examples I listed:<br>
> 1. Docker: Docker is a container platform that is widely used for building, packaging, and deploying containerized applications. It is known for its ease of use, portability, and large ecosystem of tools and services.<br>
> 2. Kubernetes: Kubernetes is a container orchestration tool that helps manage and deploy containers at scale. It automates the deployment, scaling, and management of containerized applications across multiple hosts.<br>
>
> <span style="color:Gold">Bot:</span> The main differences between container software are in their architecture, feature sets, and use cases. Here are some brief explanations of the differences between the examples I listed:<br>
> 1. Docker: Docker is a container platform that is widely used for building, packaging, and deploying containerized applications. It is known for its ease of use, portability, and large ecosystem of tools and services.<br>
> 2. Kubernetes: Kubernetes is a container orchestration tool that helps manage and deploy containers at scale. It automates the deployment, scaling, and management of containerized applications across multiple hosts.<br>
| promptflow/src/promptflow/promptflow/_cli/data/chat_flow/flow_files/README.md/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/chat_flow/flow_files/README.md",
"repo_id": "promptflow",
"token_count": 1766
} | 37 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
class GeneratorProxy:
"""A proxy for generator that can record all items that have been yielded from the generator."""
def __init__(self, generator):
self._generator = generator
self._items = []
def __iter__(self):
return self
def __next__(self):
item = next(self._generator)
self._items.append(item)
return item
@property
def items(self):
return self._items
def generate_from_proxy(proxy: GeneratorProxy):
yield from proxy
| promptflow/src/promptflow/promptflow/_core/generator_proxy.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/generator_proxy.py",
"repo_id": "promptflow",
"token_count": 215
} | 38 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow._sdk._constants import BULK_RUN_ERRORS
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException
class SDKError(UserErrorException):
"""SDK base class, target default is CONTROL_PLANE_SDK."""
def __init__(
self,
message="",
message_format="",
target: ErrorTarget = ErrorTarget.CONTROL_PLANE_SDK,
module=None,
**kwargs,
):
super().__init__(message=message, message_format=message_format, target=target, module=module, **kwargs)
class SDKInternalError(SystemErrorException):
"""SDK internal error."""
def __init__(
self,
message="",
message_format="",
target: ErrorTarget = ErrorTarget.CONTROL_PLANE_SDK,
module=None,
**kwargs,
):
super().__init__(message=message, message_format=message_format, target=target, module=module, **kwargs)
class RunExistsError(SDKError):
"""Exception raised when run already exists."""
pass
class RunNotFoundError(SDKError):
"""Exception raised if run cannot be found."""
pass
class InvalidRunStatusError(SDKError):
"""Exception raised if run status is invalid."""
pass
class UnsecureConnectionError(SDKError):
"""Exception raised if connection is not secure."""
pass
class DecryptConnectionError(SDKError):
"""Exception raised if connection decryption failed."""
pass
class StoreConnectionEncryptionKeyError(SDKError):
"""Exception raised if no keyring backend."""
pass
class InvalidFlowError(SDKError):
"""Exception raised if flow definition is not legal."""
pass
class ConnectionNotFoundError(SDKError):
"""Exception raised if connection is not found."""
pass
class InvalidRunError(SDKError):
"""Exception raised if run name is not legal."""
pass
class GenerateFlowToolsJsonError(SDKError):
"""Exception raised if flow tools json generation failed."""
pass
class BulkRunException(SDKError):
"""Exception raised when bulk run failed."""
def __init__(self, *, message="", failed_lines, total_lines, errors, module: str = None, **kwargs):
self.failed_lines = failed_lines
self.total_lines = total_lines
self._additional_info = {
BULK_RUN_ERRORS: errors,
}
message = f"First error message is: {message}"
# bulk run error is line error only when failed_lines > 0
if isinstance(failed_lines, int) and isinstance(total_lines, int) and failed_lines > 0:
message = f"Failed to run {failed_lines}/{total_lines} lines. " + message
super().__init__(message=message, target=ErrorTarget.RUNTIME, module=module, **kwargs)
@property
def additional_info(self):
"""Set the tool exception details as additional info."""
return self._additional_info
class RunOperationParameterError(SDKError):
"""Exception raised when list run failed."""
pass
class RunOperationError(SDKError):
"""Exception raised when run operation failed."""
pass
class FlowOperationError(SDKError):
"""Exception raised when flow operation failed."""
pass
class ExperimentExistsError(SDKError):
"""Exception raised when experiment already exists."""
pass
class ExperimentNotFoundError(SDKError):
"""Exception raised if experiment cannot be found."""
pass
class ExperimentValidationError(SDKError):
"""Exception raised if experiment validation failed."""
pass
class ExperimentValueError(SDKError):
"""Exception raised if experiment validation failed."""
pass
class ExperimentHasCycle(SDKError):
"""Exception raised if experiment validation failed."""
pass
class DownloadInternalError(SDKInternalError):
"""Exception raised if download internal error."""
pass
class ExperimentCommandRunError(SDKError):
"""Exception raised if experiment validation failed."""
pass
| promptflow/src/promptflow/promptflow/_sdk/_errors.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_errors.py",
"repo_id": "promptflow",
"token_count": 1364
} | 39 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import shlex
import subprocess
import sys
import tempfile
from dataclasses import asdict
from pathlib import Path
from flask import Response, jsonify, make_response, request
from promptflow._sdk._constants import FlowRunProperties, get_list_view_type
from promptflow._sdk._errors import RunNotFoundError
from promptflow._sdk._service import Namespace, Resource, fields
from promptflow._sdk._service.utils.utils import build_pfs_user_agent, get_client_from_request, make_response_no_content
from promptflow._sdk.entities import Run as RunEntity
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
from promptflow._utils.yaml_utils import dump_yaml
from promptflow.contracts._run_management import RunMetadata
api = Namespace("Runs", description="Runs Management")
# Define update run request parsing
update_run_parser = api.parser()
update_run_parser.add_argument("display_name", type=str, location="form", required=False)
update_run_parser.add_argument("description", type=str, location="form", required=False)
update_run_parser.add_argument("tags", type=str, location="form", required=False)
# Define visualize request parsing
visualize_parser = api.parser()
visualize_parser.add_argument("html", type=str, location="form", required=False)
# Response model of run operation
dict_field = api.schema_model("RunDict", {"additionalProperties": True, "type": "object"})
list_field = api.schema_model("RunList", {"type": "array", "items": {"$ref": "#/definitions/RunDict"}})
@api.route("/")
class RunList(Resource):
@api.response(code=200, description="Runs", model=list_field)
@api.doc(description="List all runs")
def get(self):
# parse query parameters
max_results = request.args.get("max_results", default=50, type=int)
all_results = request.args.get("all_results", default=False, type=bool)
archived_only = request.args.get("archived_only", default=False, type=bool)
include_archived = request.args.get("include_archived", default=False, type=bool)
# align with CLI behavior
if all_results:
max_results = None
list_view_type = get_list_view_type(archived_only=archived_only, include_archived=include_archived)
runs = get_client_from_request().runs.list(max_results=max_results, list_view_type=list_view_type)
runs_dict = [run._to_dict() for run in runs]
return jsonify(runs_dict)
@api.route("/submit")
class RunSubmit(Resource):
@api.response(code=200, description="Submit run info", model=dict_field)
@api.doc(body=dict_field, description="Submit run")
def post(self):
run_dict = request.get_json(force=True)
run_name = run_dict.get("name", None)
if not run_name:
run = RunEntity(**run_dict)
run_name = run._generate_run_name()
run_dict["name"] = run_name
with tempfile.TemporaryDirectory() as temp_dir:
run_file = Path(temp_dir) / "batch_run.yaml"
with open(run_file, "w", encoding="utf-8") as f:
dump_yaml(run_dict, f)
cmd = [
"pf",
"run",
"create",
"--file",
str(run_file),
"--user-agent",
build_pfs_user_agent(),
]
if sys.executable.endswith("pfcli.exe"):
cmd = ["pfcli"] + cmd
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
stdout, _ = process.communicate()
if process.returncode == 0:
try:
run = get_client_from_request().runs._get(name=run_name)
return jsonify(run._to_dict())
except RunNotFoundError as e:
raise RunNotFoundError(
f"Failed to get the submitted run: {e}\n"
f"Used command: {' '.join(shlex.quote(arg) for arg in cmd)}\n"
f"Output: {stdout.decode('utf-8')}"
)
else:
raise Exception(f"Create batch run failed: {stdout.decode('utf-8')}")
@api.route("/<string:name>")
class Run(Resource):
@api.response(code=200, description="Update run info", model=dict_field)
@api.doc(parser=update_run_parser, description="Update run")
def put(self, name: str):
args = update_run_parser.parse_args()
tags = json.loads(args.tags) if args.tags else None
run = get_client_from_request().runs.update(
name=name, display_name=args.display_name, description=args.description, tags=tags
)
return jsonify(run._to_dict())
@api.response(code=200, description="Get run info", model=dict_field)
@api.doc(description="Get run")
def get(self, name: str):
run = get_client_from_request().runs.get(name=name)
return jsonify(run._to_dict())
@api.response(code=204, description="Delete run", model=dict_field)
@api.doc(description="Delete run")
def delete(self, name: str):
get_client_from_request().runs.delete(name=name)
return make_response_no_content()
@api.route("/<string:name>/childRuns")
class FlowChildRuns(Resource):
@api.response(code=200, description="Child runs", model=list_field)
@api.doc(description="Get child runs")
def get(self, name: str):
run = get_client_from_request().runs.get(name=name)
local_storage_op = LocalStorageOperations(run=run)
detail_dict = local_storage_op.load_detail()
return jsonify(detail_dict["flow_runs"])
@api.route("/<string:name>/nodeRuns/<string:node_name>")
class FlowNodeRuns(Resource):
@api.response(code=200, description="Node runs", model=list_field)
@api.doc(description="Get node runs info")
def get(self, name: str, node_name: str):
run = get_client_from_request().runs.get(name=name)
local_storage_op = LocalStorageOperations(run=run)
detail_dict = local_storage_op.load_detail()
node_runs = [item for item in detail_dict["node_runs"] if item["node"] == node_name]
return jsonify(node_runs)
@api.route("/<string:name>/metaData")
class MetaData(Resource):
@api.doc(description="Get metadata of run")
@api.response(code=200, description="Run metadata", model=dict_field)
def get(self, name: str):
run = get_client_from_request().runs.get(name=name)
local_storage_op = LocalStorageOperations(run=run)
metadata = RunMetadata(
name=run.name,
display_name=run.display_name,
create_time=run.created_on,
flow_path=run.properties[FlowRunProperties.FLOW_PATH],
output_path=run.properties[FlowRunProperties.OUTPUT_PATH],
tags=run.tags,
lineage=run.run,
metrics=local_storage_op.load_metrics(),
dag=local_storage_op.load_dag_as_string(),
flow_tools_json=local_storage_op.load_flow_tools_json(),
)
return jsonify(asdict(metadata))
@api.route("/<string:name>/logContent")
class LogContent(Resource):
@api.doc(description="Get run log content")
@api.response(code=200, description="Log content", model=fields.String)
def get(self, name: str):
run = get_client_from_request().runs.get(name=name)
local_storage_op = LocalStorageOperations(run=run)
log_content = local_storage_op.logger.get_logs()
return make_response(log_content)
@api.route("/<string:name>/metrics")
class Metrics(Resource):
@api.doc(description="Get run metrics")
@api.response(code=200, description="Run metrics", model=dict_field)
def get(self, name: str):
run = get_client_from_request().runs.get(name=name)
local_storage_op = LocalStorageOperations(run=run)
metrics = local_storage_op.load_metrics()
return jsonify(metrics)
@api.route("/<string:name>/visualize")
class VisualizeRun(Resource):
@api.doc(description="Visualize run")
@api.response(code=200, description="Visualize run", model=fields.String)
@api.produces(["text/html"])
def get(self, name: str):
with tempfile.TemporaryDirectory() as temp_dir:
from promptflow._sdk.operations import RunOperations
run_op: RunOperations = get_client_from_request().runs
html_path = Path(temp_dir) / "visualize_run.html"
# visualize operation may accept name in string
run_op.visualize(name, html_path=html_path)
with open(html_path, "r") as f:
return Response(f.read(), mimetype="text/html")
@api.route("/<string:name>/archive")
class ArchiveRun(Resource):
@api.doc(description="Archive run")
@api.response(code=200, description="Archived run", model=dict_field)
def get(self, name: str):
run = get_client_from_request().runs.archive(name=name)
return jsonify(run._to_dict())
@api.route("/<string:name>/restore")
class RestoreRun(Resource):
@api.doc(description="Restore run")
@api.response(code=200, description="Restored run", model=dict_field)
def get(self, name: str):
run = get_client_from_request().runs.restore(name=name)
return jsonify(run._to_dict())
| promptflow/src/promptflow/promptflow/_sdk/_service/apis/run.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_service/apis/run.py",
"repo_id": "promptflow",
"token_count": 3864
} | 40 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
import os
import re
from promptflow._sdk._serving._errors import InvalidConnectionData, MissingConnectionProvider
from promptflow._sdk._serving.extension.default_extension import AppExtension
from promptflow._sdk._serving.monitor.data_collector import FlowDataCollector
from promptflow._sdk._serving.monitor.flow_monitor import FlowMonitor
from promptflow._sdk._serving.monitor.metrics import MetricsRecorder
from promptflow._sdk._serving.utils import decode_dict, get_pf_serving_env, normalize_connection_name
from promptflow._utils.retry_utils import retry
from promptflow._version import VERSION
from promptflow.contracts.flow import Flow
USER_AGENT = f"promptflow-cloud-serving/{VERSION}"
AML_DEPLOYMENT_RESOURCE_ID_REGEX = "/subscriptions/(.*)/resourceGroups/(.*)/providers/Microsoft.MachineLearningServices/workspaces/(.*)/onlineEndpoints/(.*)/deployments/(.*)" # noqa: E501
AML_CONNECTION_PROVIDER_TEMPLATE = "azureml:/subscriptions/{}/resourceGroups/{}/providers/Microsoft.MachineLearningServices/workspaces/{}" # noqa: E501
class AzureMLExtension(AppExtension):
"""AzureMLExtension is used to create extension for azureml serving."""
def __init__(self, logger, **kwargs):
super().__init__(logger=logger, **kwargs)
self.logger = logger
# parse promptflow project path
project_path: str = get_pf_serving_env("PROMPTFLOW_PROJECT_PATH")
if not project_path:
model_dir = os.getenv("AZUREML_MODEL_DIR", ".")
model_rootdir = os.listdir(model_dir)[0]
self.model_name = model_rootdir
project_path = os.path.join(model_dir, model_rootdir)
self.model_root_path = project_path
# mlflow support in base extension
self.project_path = self._get_mlflow_project_path(project_path)
# initialize connections or connection provider
# TODO: to be deprecated, remove in next major version
self.connections = self._get_env_connections_if_exist()
self.endpoint_name: str = None
self.deployment_name: str = None
self.connection_provider = None
self.credential = _get_managed_identity_credential_with_retry()
if len(self.connections) == 0:
self._initialize_connection_provider()
# initialize metrics common dimensions if exist
self.common_dimensions = {}
if self.endpoint_name:
self.common_dimensions["endpoint"] = self.endpoint_name
if self.deployment_name:
self.common_dimensions["deployment"] = self.deployment_name
env_dimensions = self._get_common_dimensions_from_env()
self.common_dimensions.update(env_dimensions)
# initialize flow monitor
data_collector = FlowDataCollector(self.logger)
metrics_recorder = self._get_metrics_recorder()
self.flow_monitor = FlowMonitor(
self.logger, self.get_flow_name(), data_collector, metrics_recorder=metrics_recorder
)
def get_flow_project_path(self) -> str:
return self.project_path
def get_flow_name(self) -> str:
return os.path.basename(self.model_root_path)
def get_connection_provider(self) -> str:
return self.connection_provider
def get_blueprints(self):
return self._get_default_blueprints()
def get_flow_monitor(self) -> FlowMonitor:
return self.flow_monitor
def get_override_connections(self, flow: Flow) -> (dict, dict):
connection_names = flow.get_connection_names()
connections = {}
connections_name_overrides = {}
for connection_name in connection_names:
# replace " " with "_" in connection name
normalized_name = normalize_connection_name(connection_name)
if normalized_name in os.environ:
override_conn = os.environ[normalized_name]
data_override = False
# try load connection as a json
try:
# data override
conn_data = json.loads(override_conn)
data_override = True
except ValueError:
# name override
self.logger.debug(f"Connection value is not json, enable name override for {connection_name}.")
connections_name_overrides[connection_name] = override_conn
if data_override:
try:
# try best to convert to connection, this is only for azureml deployment.
from promptflow.azure.operations._arm_connection_operations import ArmConnectionOperations
conn = ArmConnectionOperations._convert_to_connection_dict(connection_name, conn_data)
connections[connection_name] = conn
except Exception as e:
self.logger.warn(f"Failed to convert connection data to connection: {e}")
raise InvalidConnectionData(connection_name)
if len(connections_name_overrides) > 0:
self.logger.info(f"Connection name overrides: {connections_name_overrides}")
if len(connections) > 0:
self.logger.info(f"Connections data overrides: {connections.keys()}")
self.connections.update(connections)
return self.connections, connections_name_overrides
def raise_ex_on_invoker_initialization_failure(self, ex: Exception):
from promptflow.azure.operations._arm_connection_operations import UserAuthenticationError
# allow lazy authentication for UserAuthenticationError
return not isinstance(ex, UserAuthenticationError)
def get_user_agent(self) -> str:
return USER_AGENT
def get_metrics_common_dimensions(self):
return self.common_dimensions
def get_credential(self):
return self.credential
def _get_env_connections_if_exist(self):
# For local test app connections will be set.
connections = {}
env_connections = get_pf_serving_env("PROMPTFLOW_ENCODED_CONNECTIONS")
if env_connections:
connections = decode_dict(env_connections)
return connections
def _get_metrics_recorder(self):
# currently only support exporting it to azure monitor(application insights)
# TODO: add support for dynamic loading thus user can customize their own exporter.
custom_dimensions = self.get_metrics_common_dimensions()
try:
from azure.monitor.opentelemetry.exporter import AzureMonitorMetricExporter
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
# check whether azure monitor instrumentation key is set
instrumentation_key = os.getenv("AML_APP_INSIGHTS_KEY") or os.getenv("APPINSIGHTS_INSTRUMENTATIONKEY")
if instrumentation_key:
self.logger.info("Initialize metrics recorder with azure monitor metrics exporter...")
exporter = AzureMonitorMetricExporter(connection_string=f"InstrumentationKey={instrumentation_key}")
reader = PeriodicExportingMetricReader(exporter=exporter, export_interval_millis=60000)
return MetricsRecorder(self.logger, reader=reader, common_dimensions=custom_dimensions)
else:
self.logger.info("Azure monitor metrics exporter is not enabled, metrics will not be collected.")
except ImportError:
self.logger.warning("No metrics exporter module found, metrics will not be collected.")
return None
def _initialize_connection_provider(self):
# parse connection provider
self.connection_provider = get_pf_serving_env("PROMPTFLOW_CONNECTION_PROVIDER")
if not self.connection_provider:
pf_override = os.getenv("PRT_CONFIG_OVERRIDE", None)
if pf_override:
env_conf = pf_override.split(",")
env_conf_list = [setting.split("=") for setting in env_conf]
settings = {setting[0]: setting[1] for setting in env_conf_list}
self.subscription_id = settings.get("deployment.subscription_id", None)
self.resource_group = settings.get("deployment.resource_group", None)
self.workspace_name = settings.get("deployment.workspace_name", None)
self.endpoint_name = settings.get("deployment.endpoint_name", None)
self.deployment_name = settings.get("deployment.deployment_name", None)
else:
deploy_resource_id = os.getenv("AML_DEPLOYMENT_RESOURCE_ID", None)
if deploy_resource_id:
match_result = re.match(AML_DEPLOYMENT_RESOURCE_ID_REGEX, deploy_resource_id)
if len(match_result.groups()) == 5:
self.subscription_id = match_result.group(1)
self.resource_group = match_result.group(2)
self.workspace_name = match_result.group(3)
self.endpoint_name = match_result.group(4)
self.deployment_name = match_result.group(5)
else:
# raise exception if not found any valid connection provider setting
raise MissingConnectionProvider(
message="Missing connection provider, please check whether 'PROMPTFLOW_CONNECTION_PROVIDER' "
"is in your environment variable list."
) # noqa: E501
self.connection_provider = AML_CONNECTION_PROVIDER_TEMPLATE.format(
self.subscription_id, self.resource_group, self.workspace_name
) # noqa: E501
def _get_managed_identity_credential_with_retry(**kwargs):
from azure.identity import ManagedIdentityCredential
class ManagedIdentityCredentialWithRetry(ManagedIdentityCredential):
@retry(Exception)
def get_token(self, *scopes, **kwargs):
return super().get_token(*scopes, **kwargs)
return ManagedIdentityCredentialWithRetry(**kwargs)
| promptflow/src/promptflow/promptflow/_sdk/_serving/extension/azureml_extension.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_serving/extension/azureml_extension.py",
"repo_id": "promptflow",
"token_count": 4282
} | 41 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import os
import subprocess
from datetime import datetime
from pathlib import Path
from typing import Dict
from promptflow._sdk._configuration import Configuration
from promptflow._sdk._constants import ExperimentNodeType, ExperimentStatus, FlowRunProperties, RunTypes
from promptflow._sdk._errors import ExperimentCommandRunError, ExperimentHasCycle, ExperimentValueError
from promptflow._sdk._submitter import RunSubmitter
from promptflow._sdk._submitter.utils import SubmitterHelper
from promptflow._sdk.entities import Run
from promptflow._sdk.entities._experiment import Experiment
from promptflow._sdk.operations import RunOperations
from promptflow._sdk.operations._experiment_operations import ExperimentOperations
from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations
from promptflow._utils.logger_utils import LoggerFactory
from promptflow.contracts.run_info import Status
from promptflow.contracts.run_mode import RunMode
from promptflow.exceptions import UserErrorException
logger = LoggerFactory.get_logger(name=__name__)
class ExperimentOrchestrator:
"""Experiment orchestrator, responsible for experiment running."""
def __init__(self, run_operations: RunOperations, experiment_operations: ExperimentOperations):
self.run_operations = run_operations
self.experiment_operations = experiment_operations
self.run_submitter = ExperimentRunSubmitter(run_operations)
self.command_submitter = ExperimentCommandSubmitter(run_operations)
def start(self, experiment: Experiment, **kwargs):
"""Start an experiment.
:param experiment: Experiment to start.
:type experiment: ~promptflow.entities.Experiment
:param kwargs: Keyword arguments.
:type kwargs: Any
"""
# Start experiment
logger.info(f"Starting experiment {experiment.name}.")
experiment.status = ExperimentStatus.IN_PROGRESS
experiment.last_start_time = datetime.utcnow().isoformat()
experiment.last_end_time = None
self.experiment_operations.create_or_update(experiment)
# Ensure nodes order
resolved_nodes = self._ensure_nodes_order(experiment.nodes)
# Run nodes
run_dict = {}
try:
for node in resolved_nodes:
logger.info(f"Running node {node.name}.")
run = self._run_node(node, experiment, run_dict)
# Update node run to experiment
experiment._append_node_run(node.name, run)
self.experiment_operations.create_or_update(experiment)
run_dict[node.name] = run
logger.info(f"Node {node.name} run {run.name} completed, outputs to {run._output_path}.")
except Exception as e:
logger.error(f"Running experiment {experiment.name} failed with error {e}.")
finally:
# End experiment
logger.info(f"Terminating experiment {experiment.name}.")
experiment.status = ExperimentStatus.TERMINATED
experiment.last_end_time = datetime.utcnow().isoformat()
return self.experiment_operations.create_or_update(experiment)
def _ensure_nodes_order(self, nodes):
# Perform topological sort to ensure nodes order
resolved_nodes = []
def _prepare_edges(node):
node_names = set()
for input_value in node.inputs.values():
if not isinstance(input_value, str):
continue
if (
input_value.startswith("${")
and not input_value.startswith("${data.")
and not input_value.startswith("${inputs.")
):
referenced_node_name = input_value.split(".")[0].replace("${", "")
node_names.add(referenced_node_name)
return node_names
edges = {node.name: _prepare_edges(node) for node in nodes}
logger.debug(f"Experiment nodes edges: {edges!r}")
while len(resolved_nodes) != len(nodes):
action = False
for node in nodes:
if node.name not in edges:
continue
if len(edges[node.name]) != 0:
continue
action = True
resolved_nodes.append(node)
del edges[node.name]
for referenced_nodes in edges.values():
referenced_nodes.discard(node.name)
break
if not action:
raise ExperimentHasCycle(f"Experiment has circular dependency {edges!r}")
logger.debug(f"Experiment nodes resolved order: {[node.name for node in resolved_nodes]}")
return resolved_nodes
def _run_node(self, node, experiment, run_dict) -> Run:
if node.type == ExperimentNodeType.FLOW:
return self._run_flow_node(node, experiment, run_dict)
elif node.type == ExperimentNodeType.COMMAND:
return self._run_command_node(node, experiment, run_dict)
raise ExperimentValueError(f"Unknown experiment node {node.name!r} type {node.type!r}")
def _run_flow_node(self, node, experiment, run_dict):
run_output_path = (Path(experiment._output_dir) / "runs" / node.name).resolve().absolute().as_posix()
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
run = ExperimentRun(
node_name=node.name,
experiment=experiment,
experiment_runs=run_dict,
# Use node name as prefix for run name?
name=f"{node.name}_attempt{timestamp}",
display_name=node.display_name or node.name,
column_mapping=node.inputs,
variant=node.variant,
flow=node.path,
connections=node.connections,
environment_variables=node.environment_variables,
# Config run output path to experiment output folder
config=Configuration(overrides={Configuration.RUN_OUTPUT_PATH: run_output_path}),
)
logger.debug(f"Creating run {run.name}")
return self.run_submitter.submit(run)
def _run_command_node(self, node, experiment, run_dict):
run_output_path = (Path(experiment._output_dir) / "runs" / node.name).resolve().absolute().as_posix()
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
run = ExperimentRun(
type=RunTypes.COMMAND,
node_name=node.name,
experiment=experiment,
experiment_runs=run_dict,
name=f"{node.name}_attempt{timestamp}",
display_name=node.display_name or node.name,
column_mapping=node.inputs,
# Use command code path as flow path
flow=node.code,
outputs=node.outputs,
command=node.command,
environment_variables=node.environment_variables,
config=Configuration(overrides={Configuration.RUN_OUTPUT_PATH: run_output_path}),
)
logger.debug(f"Creating run {run.name}")
return self.command_submitter.submit(run)
class ExperimentRun(Run):
"""Experiment run, includes experiment running context, like data, inputs and runs."""
def __init__(self, experiment, node_name, experiment_runs: Dict[str, "ExperimentRun"], **kwargs):
self.node_name = node_name
self.experiment = experiment
self.experiment_data = {data.name: data for data in experiment.data}
self.experiment_inputs = {input.name: input for input in experiment.inputs}
self.experiment_runs = experiment_runs
super().__init__(**kwargs)
self._resolve_column_mapping()
def _resolve_column_mapping(self):
"""Resolve column mapping with experiment inputs to constant values."""
logger.info(f"Start resolve node {self.node_name!r} column mapping.")
resolved_mapping = {}
for name, value in self.column_mapping.items():
if not isinstance(value, str) or not value.startswith("${inputs."):
resolved_mapping[name] = value
continue
input_name = value.split(".")[1].replace("}", "")
if input_name not in self.experiment_inputs:
raise ExperimentValueError(
f"Node {self.node_name!r} inputs {value!r} related experiment input {input_name!r} not found."
)
resolved_mapping[name] = self.experiment_inputs[input_name].default
logger.debug(f"Resolved node {self.node_name!r} column mapping {resolved_mapping}.")
self.column_mapping = resolved_mapping
def _get_referenced_data_and_run(self) -> tuple:
"""Get the node referenced data and runs. Format: {name: ExperimentData/ExperimentRun}"""
data, run = {}, {}
inputs_mapping = self.column_mapping
for value in inputs_mapping.values():
if not isinstance(value, str):
continue
if value.startswith("${data."):
name = value.split(".")[1].replace("}", "")
if name not in self.experiment_data:
raise ExperimentValueError(
f"Node {self.display_name!r} inputs {value!r} related experiment data {name!r} not found."
)
data[name] = self.experiment_data[name]
elif value.startswith("${"):
name = value.split(".")[0].replace("${", "")
if name not in self.experiment_runs:
raise ExperimentValueError(
f"Node {self.display_name!r} inputs {value!r} related experiment run {name!r} not found."
)
run[name] = self.experiment_runs[name]
return data, run
class ExperimentRunSubmitterHelper:
@staticmethod
def resolve_binding_from_run(run_name, run, run_operations) -> dict:
"""Return the valid binding dict based on a run."""
binding_dict = {
# to align with cloud behavior, run.inputs should refer to original data
f"{run_name}.inputs": run_operations._get_data_path(run),
}
# Update command node outputs
if run._outputs:
binding_dict.update({f"{run_name}.outputs.{name}": path for name, path in run._outputs.items()})
else:
binding_dict.update({f"{run_name}.outputs": run_operations._get_outputs_path(run)})
logger.debug(f"Resolved node {run_name} binding inputs {binding_dict}.")
return binding_dict
class ExperimentRunSubmitter(RunSubmitter):
"""Experiment run submitter, override some function from RunSubmitter as experiment run could be different."""
@classmethod
def _validate_inputs(cls, run: Run):
# Do not validate run/data field, as we will resolve them in _resolve_input_dirs.
return
def _resolve_input_dirs(self, run: ExperimentRun):
logger.info("Start resolve node %s input dirs.", run.node_name)
logger.debug(f"Experiment context: {run.experiment_data}, {run.experiment_runs}, inputs: {run.column_mapping}")
# Get the node referenced data and run
referenced_data, referenced_run = run._get_referenced_data_and_run()
if len(referenced_data) > 1:
raise ExperimentValueError(
f"Experiment flow node {run.node_name!r} has multiple data inputs {referenced_data}, "
"only 1 is expected."
)
if len(referenced_run) > 1:
raise ExperimentValueError(
f"Experiment flow node {run.node_name!r} has multiple run inputs {referenced_run}, "
"only 1 is expected."
)
(data_name, data_obj) = next(iter(referenced_data.items())) if referenced_data else (None, None)
(run_name, run_obj) = next(iter(referenced_run.items())) if referenced_run else (None, None)
logger.debug(f"Resolve node {run.node_name} referenced data {data_name!r}, run {run_name!r}.")
# Build inputs from experiment data and run
result = {}
if data_obj:
result.update({f"data.{data_name}": data_obj.path})
if run_obj:
result.update(ExperimentRunSubmitterHelper.resolve_binding_from_run(run_name, run_obj, self.run_operations))
result = {k: str(Path(v).resolve()) for k, v in result.items() if v is not None}
logger.debug(f"Resolved node {run.node_name} input dirs {result}.")
return result
class ExperimentCommandSubmitter:
"""Experiment command submitter, responsible for experiment command running."""
def __init__(self, run_operations: RunOperations):
self.run_operations = run_operations
def submit(self, run: ExperimentRun, **kwargs):
"""Submit an experiment command run.
:param run: Experiment command to submit.
:type run: ~promptflow.entities.Run
"""
local_storage = LocalStorageOperations(run, run_mode=RunMode.SingleNode)
self._submit_command_run(run=run, local_storage=local_storage)
return self.run_operations.get(name=run.name)
def _resolve_inputs(self, run: ExperimentRun):
"""Resolve binding inputs to constant values."""
# e.g. "input_path": "${data.my_data}" -> "${inputs.input_path}": "real_data_path"
logger.info("Start resolve node %s inputs.", run.node_name)
data, runs = run._get_referenced_data_and_run()
# prepare "${data.my_data}": real_data_path
binding_dict = {"${data.%s}" % name: val.path for name, val in data.items()}
# prepare "${run.outputs}": run_outputs_path, "${run.inputs}": run_inputs_path
for name, val in runs.items():
binding_dict.update(
{
"${%s}" % k: v
for k, v in ExperimentRunSubmitterHelper.resolve_binding_from_run(
name, val, self.run_operations
).items()
}
)
logger.debug(f"Resolved node {run.node_name} binding inputs {binding_dict}.")
# resolve inputs
resolved_inputs = {}
for name, value in run.column_mapping.items():
if not isinstance(value, str) or not value.startswith("${"):
resolved_inputs[name] = value
continue
# my_input: "${run.outputs}" -> my_input: run_outputs_path
if value in binding_dict:
resolved_inputs[name] = binding_dict[value]
continue
logger.warning(
f"Possibly invalid partial input value binding {value!r} found for node {run.node_name!r}. "
"Only full binding is supported for command node. For example: ${data.my_data}, ${main_node.outputs}."
)
resolved_inputs[name] = value
logger.debug(f"Resolved node {run.node_name} inputs {resolved_inputs}.")
return resolved_inputs
def _resolve_outputs(self, run: ExperimentRun):
"""Resolve outputs to real path."""
# e.g. "output_path": "${outputs.my_output}" -> "${outputs.output_path}": "real_output_path"
logger.info("Start resolve node %s outputs.", run.node_name)
# resolve outputs
resolved_outputs = {}
for name, value in run._outputs.items():
# Set default output path if user doesn't set it
if not value:
# Create default output path if user doesn't set it
value = run._output_path / name
value.mkdir(parents=True, exist_ok=True)
value = value.resolve().absolute().as_posix()
# Update default to run
run._outputs[name] = value
# Note: We will do nothing if user config the value, as we don't know it's a file or folder
resolved_outputs[name] = value
logger.debug(f"Resolved node {run.node_name} outputs {resolved_outputs}.")
return resolved_outputs
def _resolve_command(self, run: ExperimentRun, inputs: dict, outputs: dict):
"""Resolve command to real command."""
logger.info("Start resolve node %s command.", run.node_name)
# resolve command
resolved_command = run._command
# replace inputs
for name, value in inputs.items():
resolved_command = resolved_command.replace(f"${{inputs.{name}}}", str(value))
# replace outputs
for name, value in outputs.items():
resolved_command = resolved_command.replace(f"${{outputs.{name}}}", str(value))
logger.debug(f"Resolved node {run.node_name} command {resolved_command}.")
if "${" in resolved_command:
logger.warning(
f"Possibly unresolved command value binding found for node {run.node_name!r}. "
f"Resolved command: {resolved_command}. Please check your command again."
)
return resolved_command
def _submit_command_run(self, run: ExperimentRun, local_storage: LocalStorageOperations) -> dict:
# resolve environment variables
SubmitterHelper.resolve_environment_variables(environment_variables=run.environment_variables)
SubmitterHelper.init_env(environment_variables=run.environment_variables)
# resolve inputs & outputs for command preparing
# e.g. input_path: ${data.my_data} -> ${inputs.input_path}: real_data_path
inputs = self._resolve_inputs(run)
outputs = self._resolve_outputs(run)
# replace to command
command = self._resolve_command(run, inputs, outputs)
# execute command
status = Status.Failed.value
# create run to db when fully prepared to run in executor, otherwise won't create it
run._dump() # pylint: disable=protected-access
try:
return_code = ExperimentCommandExecutor.run(command=command, cwd=run.flow, local_storage=local_storage)
if return_code != 0:
raise ExperimentCommandRunError(
f"Run {run.name} failed with return code {return_code}, "
f"please check out {run.properties[FlowRunProperties.OUTPUT_PATH]} for more details."
)
status = Status.Completed.value
except Exception as e:
# when run failed in executor, store the exception in result and dump to file
logger.warning(f"Run {run.name} failed when executing in executor with exception {e}.")
# for user error, swallow stack trace and return failed run since user don't need the stack trace
if not isinstance(e, UserErrorException):
# for other errors, raise it to user to help debug root cause.
raise e
finally:
self.run_operations.update(
name=run.name,
status=status,
end_time=datetime.now(),
)
class ExperimentCommandExecutor:
"""Experiment command executor, responsible for experiment command running."""
@staticmethod
def run(command: str, cwd: str, local_storage: LocalStorageOperations):
"""Start a subprocess to run the command"""
log_path = local_storage.logger.file_path
logger.info(f"Start running command {command}, log path: {log_path}.")
with open(log_path, "w") as log_file:
process = subprocess.Popen(command, stdout=log_file, stderr=log_file, shell=True, env=os.environ, cwd=cwd)
process.wait()
return process.returncode
| promptflow/src/promptflow/promptflow/_sdk/_submitter/experiment_orchestrator.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_submitter/experiment_orchestrator.py",
"repo_id": "promptflow",
"token_count": 8367
} | 42 |
Exported Dockerfile & its dependencies are located in the same folder. The structure is as below:
- flow: the folder contains all the flow files
- ...
- connections: the folder contains yaml files to create all related connections
- ...
- runit: the folder contains all the runit scripts
- ...
- Dockerfile: the dockerfile to build the image
- start.sh: the script used in `CMD` of `Dockerfile` to start the service
- settings.json: a json file to store the settings of the docker image
- README.md: the readme file to describe how to use the dockerfile
Please refer to [official doc](https://microsoft.github.io/promptflow/how-to-guides/deploy-and-export-a-flow.html#export-a-flow)
for more details about how to use the exported dockerfile and scripts.
| promptflow/src/promptflow/promptflow/_sdk/data/docker/README.md/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/docker/README.md",
"repo_id": "promptflow",
"token_count": 206
} | 43 |
{
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Tool",
"type": "object",
"properties": {
"name": {
"type": "string"
},
"type": {
"$ref": "#/definitions/ToolType"
},
"inputs": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/InputDefinition"
}
},
"outputs": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/OutputDefinition"
}
},
"description": {
"type": "string"
},
"connection_type": {
"type": "array",
"items": {
"$ref": "#/definitions/ConnectionType"
}
},
"module": {
"type": "string"
},
"class_name": {
"type": "string"
},
"source": {
"type": "string"
},
"LkgCode": {
"type": "string"
},
"code": {
"type": "string"
},
"function": {
"type": "string"
},
"action_type": {
"type": "string"
},
"provider_config": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/InputDefinition"
}
},
"function_config": {
"type": "object",
"additionalProperties": {
"$ref": "#/definitions/InputDefinition"
}
},
"icon": {},
"category": {
"type": "string"
},
"tags": {
"type": "object",
"additionalProperties": {}
},
"is_builtin": {
"type": "boolean"
},
"package": {
"type": "string"
},
"package_version": {
"type": "string"
},
"default_prompt": {
"type": "string"
},
"enable_kwargs": {
"type": "boolean"
},
"deprecated_tools": {
"type": "array",
"items": {
"type": "string"
}
},
"tool_state": {
"$ref": "#/definitions/ToolState"
}
},
"definitions": {
"ToolType": {
"type": "string",
"description": "",
"x-enumNames": [
"Llm",
"Python",
"Action",
"Prompt",
"CustomLLM",
"CSharp"
],
"enum": [
"llm",
"python",
"action",
"prompt",
"custom_llm",
"csharp"
]
},
"InputDefinition": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"type": {
"type": "array",
"items": {
"$ref": "#/definitions/ValueType"
}
},
"default": {},
"description": {
"type": "string"
},
"enum": {
"type": "array",
"items": {
"type": "string"
}
},
"enabled_by": {
"type": "string"
},
"enabled_by_type": {
"type": "array",
"items": {
"$ref": "#/definitions/ValueType"
}
},
"enabled_by_value": {
"type": "array",
"items": {}
},
"model_list": {
"type": "array",
"items": {
"type": "string"
}
},
"capabilities": {
"$ref": "#/definitions/AzureOpenAIModelCapabilities"
},
"dynamic_list": {
"$ref": "#/definitions/ToolInputDynamicList"
},
"allow_manual_entry": {
"type": "boolean"
},
"is_multi_select": {
"type": "boolean"
},
"generated_by": {
"$ref": "#/definitions/ToolInputGeneratedBy"
},
"input_type": {
"$ref": "#/definitions/InputType"
},
"advanced": {
"type": [
"boolean",
"null"
]
},
"ui_hints": {
"type": "object",
"additionalProperties": {}
}
}
},
"ValueType": {
"type": "string",
"description": "",
"x-enumNames": [
"Int",
"Double",
"Bool",
"String",
"Secret",
"PromptTemplate",
"Object",
"List",
"BingConnection",
"OpenAIConnection",
"AzureOpenAIConnection",
"AzureContentModeratorConnection",
"CustomConnection",
"AzureContentSafetyConnection",
"SerpConnection",
"CognitiveSearchConnection",
"SubstrateLLMConnection",
"PineconeConnection",
"QdrantConnection",
"WeaviateConnection",
"FunctionList",
"FunctionStr",
"FormRecognizerConnection",
"FilePath",
"Image"
],
"enum": [
"int",
"double",
"bool",
"string",
"secret",
"prompt_template",
"object",
"list",
"BingConnection",
"OpenAIConnection",
"AzureOpenAIConnection",
"AzureContentModeratorConnection",
"CustomConnection",
"AzureContentSafetyConnection",
"SerpConnection",
"CognitiveSearchConnection",
"SubstrateLLMConnection",
"PineconeConnection",
"QdrantConnection",
"WeaviateConnection",
"function_list",
"function_str",
"FormRecognizerConnection",
"file_path",
"image"
]
},
"AzureOpenAIModelCapabilities": {
"type": "object",
"properties": {
"completion": {
"type": [
"boolean",
"null"
]
},
"chat_completion": {
"type": [
"boolean",
"null"
]
},
"embeddings": {
"type": [
"boolean",
"null"
]
}
}
},
"ToolInputDynamicList": {
"type": "object",
"properties": {
"func_path": {
"type": "string"
},
"func_kwargs": {
"type": "array",
"description": "Sample value in yaml\nfunc_kwargs:\n- name: prefix # Argument name to be passed to the function\n type: \n - string\n # if optional is not specified, default to false.\n # this is for UX pre-validaton. If optional is false, but no input. UX can throw error in advanced.\n optional: true\n reference: ${inputs.index_prefix} # Dynamic reference to another input parameter\n- name: size # Another argument name to be passed to the function\n type: \n - int\n optional: true\n default: 10",
"items": {
"type": "object",
"additionalProperties": {}
}
}
}
},
"ToolInputGeneratedBy": {
"type": "object",
"properties": {
"func_path": {
"type": "string"
},
"func_kwargs": {
"type": "array",
"description": "Sample value in yaml\nfunc_kwargs:\n- name: index_type # Argument name to be passed to the function\n type: \n - string\n optional: true\n reference: ${inputs.index_type} # Dynamic reference to another input parameter\n- name: index # Another argument name to be passed to the function\n type: \n - string\n optional: true\n reference: ${inputs.index}",
"items": {
"type": "object",
"additionalProperties": {}
}
},
"reverse_func_path": {
"type": "string"
}
}
},
"InputType": {
"type": "string",
"description": "",
"x-enumNames": [
"Default",
"UIOnly_Hidden"
],
"enum": [
"default",
"uionly_hidden"
]
},
"OutputDefinition": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"type": {
"type": "array",
"items": {
"$ref": "#/definitions/ValueType"
}
},
"description": {
"type": "string"
},
"isProperty": {
"type": "boolean"
}
}
},
"ConnectionType": {
"type": "string",
"description": "",
"x-enumNames": [
"OpenAI",
"AzureOpenAI",
"Serp",
"Bing",
"AzureContentModerator",
"Custom",
"AzureContentSafety",
"CognitiveSearch",
"SubstrateLLM",
"Pinecone",
"Qdrant",
"Weaviate",
"FormRecognizer"
],
"enum": [
"OpenAI",
"AzureOpenAI",
"Serp",
"Bing",
"AzureContentModerator",
"Custom",
"AzureContentSafety",
"CognitiveSearch",
"SubstrateLLM",
"Pinecone",
"Qdrant",
"Weaviate",
"FormRecognizer"
]
},
"ToolState": {
"type": "string",
"description": "",
"x-enumNames": [
"Stable",
"Preview",
"Deprecated"
],
"enum": [
"stable",
"preview",
"deprecated"
]
}
}
}
| promptflow/src/promptflow/promptflow/_sdk/data/tool.schema.json/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/data/tool.schema.json",
"repo_id": "promptflow",
"token_count": 4791
} | 44 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from functools import lru_cache
from os import PathLike
from pathlib import Path
from typing import Dict
from promptflow._sdk._constants import NODES
from promptflow._sdk._utils import parse_variant
from promptflow._sdk.entities import FlowContext
from promptflow._sdk.entities._flow import Flow
from promptflow._utils.flow_utils import load_flow_dag
from promptflow.contracts.flow import Node
from promptflow.exceptions import UserErrorException
# Resolve flow context to invoker
# Resolve flow according to flow context
# Resolve connection, variant, overwrite, store in-memory
# create invoker based on resolved flow
# cache invoker if flow context not changed (define hash function for flow context).
class FlowContextResolver:
"""Flow context resolver."""
def __init__(self, flow_path: PathLike):
from promptflow import PFClient
self.flow_path, self.flow_dag = load_flow_dag(flow_path=Path(flow_path))
self.working_dir = Path(self.flow_path).parent.resolve()
self.node_name_2_node: Dict[str, Node] = {node["name"]: node for node in self.flow_dag[NODES]}
self.client = PFClient()
@classmethod
@lru_cache
def resolve(cls, flow: Flow) -> "FlowInvoker":
"""Resolve flow to flow invoker."""
resolver = cls(flow_path=flow.path)
resolver._resolve(flow_context=flow.context)
return resolver._create_invoker(flow=flow, flow_context=flow.context)
def _resolve(self, flow_context: FlowContext):
"""Resolve flow context."""
# TODO(2813319): support node overrides
# TODO: define priority of the contexts
flow_context._resolve_connections()
self._resolve_variant(flow_context=flow_context)._resolve_connections(
flow_context=flow_context,
)._resolve_overrides(flow_context=flow_context)
def _resolve_variant(self, flow_context: FlowContext) -> "FlowContextResolver":
"""Resolve variant of the flow and store in-memory."""
# TODO: put all varint string parser here
if not flow_context.variant:
return self
else:
tuning_node, variant = parse_variant(flow_context.variant)
from promptflow._sdk._submitter import overwrite_variant
overwrite_variant(
flow_dag=self.flow_dag,
tuning_node=tuning_node,
variant=variant,
)
return self
def _resolve_connections(self, flow_context: FlowContext) -> "FlowContextResolver":
"""Resolve connections of the flow and store in-memory."""
from promptflow._sdk._submitter import overwrite_connections
overwrite_connections(
flow_dag=self.flow_dag,
connections=flow_context.connections,
working_dir=self.working_dir,
)
return self
def _resolve_overrides(self, flow_context: FlowContext) -> "FlowContextResolver":
"""Resolve overrides of the flow and store in-memory."""
from promptflow._sdk._submitter import overwrite_flow
overwrite_flow(
flow_dag=self.flow_dag,
params_overrides=flow_context.overrides,
)
return self
def _resolve_connection_objs(self, flow_context: FlowContext):
# validate connection objs
connections = {}
for key, connection_obj in flow_context._connection_objs.items():
scrubbed_secrets = connection_obj._get_scrubbed_secrets()
if scrubbed_secrets:
raise UserErrorException(
f"Connection {connection_obj} contains scrubbed secrets with key {scrubbed_secrets.keys()}, "
"please make sure connection has decrypted secrets to use in flow execution. "
)
connections[key] = connection_obj._to_execution_connection_dict()
return connections
def _create_invoker(self, flow: Flow, flow_context: FlowContext) -> "FlowInvoker":
from promptflow._sdk._serving.flow_invoker import FlowInvoker
connections = self._resolve_connection_objs(flow_context=flow_context)
# use updated flow dag to create new flow object for invoker
resolved_flow = Flow(code=self.working_dir, dag=self.flow_dag)
invoker = FlowInvoker(
flow=resolved_flow,
connections=connections,
streaming=flow_context.streaming,
)
return invoker
| promptflow/src/promptflow/promptflow/_sdk/operations/_flow_context_resolver.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/operations/_flow_context_resolver.py",
"repo_id": "promptflow",
"token_count": 1768
} | 45 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import io
import re
from jinja2 import Template
from .yaml_utils import dump_yaml, load_yaml_string
def generate_custom_strong_type_connection_spec(cls, package, package_version):
connection_spec = {
"connectionCategory": "CustomKeys",
"flowValueType": "CustomConnection",
"connectionType": cls.__name__,
"ConnectionTypeDisplayName": cls.__name__,
"configSpecs": [],
"module": cls.__module__,
"package": package,
"package_version": package_version,
}
for k, typ in cls.__annotations__.items():
spec = {
"name": k,
"displayName": k.replace("_", " ").title(),
"configValueType": typ.__name__,
}
if hasattr(cls, k):
spec["isOptional"] = getattr(cls, k, None) is not None
else:
spec["isOptional"] = False
connection_spec["configSpecs"].append(spec)
return connection_spec
def generate_custom_strong_type_connection_template(cls, connection_spec, package, package_version):
connection_template_str = """
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomStrongTypeConnection.schema.json
name: "to_replace_with_connection_name"
type: custom
custom_type: {{ custom_type }}
module: {{ module }}
package: {{ package }}
package_version: {{ package_version }}
configs:{% for key, value in configs.items() %}
{{ key }}: "{{ value -}}"{% endfor %}
secrets: # must-have{% for key, value in secrets.items() %}
{{ key }}: "{{ value -}}"{% endfor %}
"""
connection_template = Template(connection_template_str)
# Extract configs and secrets
configs = {}
secrets = {}
for spec in connection_spec["configSpecs"]:
if spec["configValueType"] == "Secret":
secrets[spec["name"]] = "to_replace_with_" + spec["name"].replace("-", "_")
else:
configs[spec["name"]] = getattr(cls, spec["name"], None) or "to_replace_with_" + spec["name"].replace(
"-", "_"
)
# Prepare data for template
data = {
"custom_type": cls.__name__,
"module": cls.__module__,
"package": package,
"package_version": package_version,
"configs": configs,
"secrets": secrets,
}
connection_template_with_data = connection_template.render(data)
connection_template_with_comments = render_comments(
connection_template_with_data, cls, secrets.keys(), configs.keys()
)
return connection_template_with_comments
def render_comments(connection_template, cls, secrets, configs):
if cls.__doc__ is not None:
data = load_yaml_string(connection_template)
comments_map = extract_comments_mapping(list(secrets) + list(configs), cls.__doc__)
# Add comments for secret keys
for key in secrets:
if key in comments_map.keys():
data["secrets"].yaml_add_eol_comment(comments_map[key] + "\n", key)
# Add comments for config keys
for key in configs:
if key in comments_map.keys():
data["configs"].yaml_add_eol_comment(comments_map[key] + "\n", key)
# Dump data object back to string
buf = io.StringIO()
dump_yaml(data, buf)
connection_template_with_comments = buf.getvalue()
return connection_template_with_comments
return connection_template
def extract_comments_mapping(keys, doc):
comments_map = {}
for key in keys:
try:
param_pattern = rf":param {key}: (.*)"
key_description = " ".join(re.findall(param_pattern, doc))
type_pattern = rf":type {key}: (.*)"
key_type = " ".join(re.findall(type_pattern, doc)).rstrip(".")
if key_type and key_description:
comments_map[key] = " ".join([key_type + " type.", key_description])
elif key_type:
comments_map[key] = key_type + " type."
elif key_description:
comments_map[key] = key_description
except re.error:
print("An error occurred when extract comments mapping.")
return comments_map
| promptflow/src/promptflow/promptflow/_utils/connection_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/connection_utils.py",
"repo_id": "promptflow",
"token_count": 1814
} | 46 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""This is a common util file.
!!!Please do not include any project related import.!!!
"""
import contextlib
import contextvars
import functools
import importlib
import json
import logging
import os
import re
import time
import traceback
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Iterable, Iterator, List, Optional, TypeVar, Union
from promptflow._constants import DEFAULT_ENCODING
T = TypeVar("T")
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, item):
if item in self:
return self.__getitem__(item)
return super().__getattribute__(item)
def camel_to_snake(text: str) -> Optional[str]:
text = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", text)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", text).lower()
class DateTimeEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, datetime):
return o.isoformat()
return json.JSONEncoder.default(self, o)
def is_json_serializable(value: Any) -> bool:
try:
json.dumps(value)
return True
except TypeError:
return False
def load_json(file_path: Union[str, Path]) -> dict:
if os.path.getsize(file_path) > 0:
with open(file_path, "r") as f:
return json.load(f)
return {}
def dump_list_to_jsonl(file_path: Union[str, Path], list_data: List[Dict]):
with open(file_path, "w", encoding=DEFAULT_ENCODING) as jsonl_file:
for data in list_data:
json.dump(data, jsonl_file, ensure_ascii=False)
jsonl_file.write("\n")
def transpose(values: List[Dict[str, Any]], keys: Optional[List] = None) -> Dict[str, List]:
keys = keys or list(values[0].keys())
return {key: [v.get(key) for v in values] for key in keys}
def reverse_transpose(values: Dict[str, List]) -> List[Dict[str, Any]]:
# Setup a result list same len with values
value_lists = list(values.values())
_len = len(value_lists[0])
if any(len(value_list) != _len for value_list in value_lists):
raise Exception(f"Value list of each key must have same length, please check {values!r}.")
result = []
for i in range(_len):
result.append({})
for key, vals in values.items():
for _idx, val in enumerate(vals):
result[_idx][key] = val
return result
def deprecated(f=None, replace=None, version=None):
if f is None:
return functools.partial(deprecated, replace=replace, version=version)
msg = [f"Function {f.__qualname__!r} is deprecated."]
if version:
msg.append(f"Deprecated since version {version}.")
if replace:
msg.append(f"Use {replace!r} instead.")
msg = " ".join(msg)
@functools.wraps(f)
def wrapper(*args, **kwargs):
logging.warning(msg)
return f(*args, **kwargs)
return wrapper
def try_import(module, error_message, raise_error=True):
try:
importlib.import_module(module)
except ImportError as e:
ex_message = f"{error_message} Root cause: {e!r}"
logging.warning(ex_message)
if raise_error:
raise Exception(ex_message)
def is_in_ci_pipeline():
if os.environ.get("IS_IN_CI_PIPELINE") == "true":
return True
return False
def count_and_log_progress(
inputs: Iterable[T], logger: logging.Logger, total_count: int, formatter="{count} / {total_count} finished."
) -> Iterator[T]:
log_interval = max(int(total_count / 10), 1)
count = 0
for item in inputs:
count += 1
if count % log_interval == 0 or count == total_count:
logger.info(formatter.format(count=count, total_count=total_count))
yield item
def log_progress(
run_start_time: datetime,
logger: logging.Logger,
count: int,
total_count: int,
formatter="Finished {count} / {total_count} lines.",
*,
last_log_count: Optional[int] = None,
):
# Calculate log_interval to determine when to log progress.
# If total_count is less than 100, log every 10% of total_count; otherwise, log every 10 lines.
log_interval = min(10, max(int(total_count / 10), 1))
# If last_log_count is not None, determine whether to log based on whether the difference
# between the current count and the previous count exceeds log_interval.
# Otherwise, decide based on whether the current count is evenly divisible by log_interval.
if last_log_count:
log_flag = (count - last_log_count) >= log_interval
else:
log_flag = count % log_interval == 0
if count > 0 and (log_flag or count == total_count):
average_execution_time = round((datetime.utcnow().timestamp() - run_start_time.timestamp()) / count, 2)
estimated_execution_time = round(average_execution_time * (total_count - count), 2)
logger.info(formatter.format(count=count, total_count=total_count))
logger.info(
f"Average execution time for completed lines: {average_execution_time} seconds. "
f"Estimated time for incomplete lines: {estimated_execution_time} seconds."
)
def extract_user_frame_summaries(frame_summaries: List[traceback.FrameSummary]):
from promptflow import _core
core_folder = os.path.dirname(_core.__file__)
for i in range(len(frame_summaries) - 1):
cur_file = frame_summaries[i].filename
next_file = frame_summaries[i + 1].filename
# If the current frame is in _core folder and the next frame is not in _core folder
# then we can say that the next frame is in user code.
if cur_file.startswith(core_folder) and not next_file.startswith(core_folder):
return frame_summaries[i + 1 :]
return frame_summaries
def format_user_stacktrace(frame):
# TODO: Maybe we can filter all frames from our code base to make it clean?
frame_summaries = traceback.extract_stack(frame)
user_frame_summaries = extract_user_frame_summaries(frame_summaries)
return traceback.format_list(user_frame_summaries)
def generate_elapsed_time_messages(func_name: str, start_time: float, interval: int, thread_id: int):
import sys
frames = sys._current_frames()
if thread_id not in frames:
thread_msg = (
f"thread {thread_id} cannot be found in sys._current_frames, "
+ "maybe it has been terminated due to unexpected errors."
)
else:
frame = frames[thread_id]
stack_msgs = format_user_stacktrace(frame)
stack_msg = "".join(stack_msgs)
thread_msg = f"stacktrace of thread {thread_id}:\n{stack_msg}"
elapse_time = time.perf_counter() - start_time
# Make elapse time a multiple of interval.
elapse_time = round(elapse_time / interval) * interval
msgs = [f"{func_name} has been running for {elapse_time:.0f} seconds, {thread_msg}"]
return msgs
def set_context(context: contextvars.Context):
for var, value in context.items():
var.set(value)
def convert_inputs_mapping_to_param(inputs_mapping: dict):
"""Use this function to convert inputs_mapping to a string that can be passed to component as a string parameter,
we have to do this since we can't pass a dict as a parameter to component.
# TODO: Finalize the format of inputs_mapping
"""
return ",".join([f"{k}={v}" for k, v in inputs_mapping.items()])
@contextlib.contextmanager
def environment_variable_overwrite(key, val):
if key in os.environ.keys():
backup_value = os.environ[key]
else:
backup_value = None
os.environ[key] = val
try:
yield
finally:
if backup_value:
os.environ[key] = backup_value
else:
os.environ.pop(key)
def resolve_dir_to_absolute(base_dir: Union[str, Path], sub_dir: Union[str, Path]) -> Path:
"""Resolve directory to absolute path with base_dir as root"""
path = sub_dir if isinstance(sub_dir, Path) else Path(sub_dir)
if not path.is_absolute():
base_dir = base_dir if isinstance(base_dir, Path) else Path(base_dir)
path = base_dir / sub_dir
return path
def parse_ua_to_dict(ua):
"""Parse string user agent to dict with name as ua name and value as ua version."""
ua_dict = {}
ua_list = ua.split(" ")
for item in ua_list:
if item:
key, value = item.split("/")
ua_dict[key] = value
return ua_dict
# TODO: Add "conditions" parameter to pass in a list of lambda functions
# to check if the environment variable is valid.
def get_int_env_var(env_var_name, default_value=None):
"""
The function `get_int_env_var` retrieves an integer environment variable value, with an optional
default value if the variable is not set or cannot be converted to an integer.
:param env_var_name: The name of the environment variable you want to retrieve the value of
:param default_value: The default value is the value that will be returned if the environment
variable is not found or if it cannot be converted to an integer
:return: an integer value.
"""
try:
return int(os.environ.get(env_var_name, default_value))
except Exception:
return default_value
def prompt_y_n(msg, default=None):
if default not in [None, "y", "n"]:
raise ValueError("Valid values for default are 'y', 'n' or None")
y = "Y" if default == "y" else "y"
n = "N" if default == "n" else "n"
while True:
ans = prompt_input("{} ({}/{}): ".format(msg, y, n))
if ans.lower() == n.lower():
return False
if ans.lower() == y.lower():
return True
if default and not ans:
return default == y.lower()
def prompt_input(msg):
return input("\n===> " + msg)
| promptflow/src/promptflow/promptflow/_utils/utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/utils.py",
"repo_id": "promptflow",
"token_count": 3875
} | 47 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._azure_machine_learning_designer_service_client import AzureMachineLearningDesignerServiceClient
__all__ = ['AzureMachineLearningDesignerServiceClient']
# `._patch.py` is used for handwritten extensions to the generated code
# Example: https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/customize_code/how-to-patch-sdk-code.md
from ._patch import patch_sdk
patch_sdk()
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/__init__.py",
"repo_id": "promptflow",
"token_count": 192
} | 48 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._flow_sessions_admin_operations import build_create_flow_session_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class FlowSessionsAdminOperations:
"""FlowSessionsAdminOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_flow_session(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
session_id: str,
waitfor_completion: Optional[bool] = False,
body: Optional["_models.CreateFlowSessionRequest"] = None,
**kwargs: Any
) -> str:
"""create_flow_session.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param session_id:
:type session_id: str
:param waitfor_completion:
:type waitfor_completion: bool
:param body:
:type body: ~flow.models.CreateFlowSessionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateFlowSessionRequest')
else:
_json = None
request = build_create_flow_session_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
session_id=session_id,
content_type=content_type,
json=_json,
waitfor_completion=waitfor_completion,
template_url=self.create_flow_session.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_flow_session.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessionsAdmin/{sessionId}'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flow_sessions_admin_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_flow_sessions_admin_operations.py",
"repo_id": "promptflow",
"token_count": 1820
} | 49 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_create_flow_session_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
session_id, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
waitfor_completion = kwargs.pop('waitfor_completion', False) # type: Optional[bool]
accept = "text/plain, application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessionsAdmin/{sessionId}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"sessionId": _SERIALIZER.url("session_id", session_id, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if waitfor_completion is not None:
query_parameters['waitforCompletion'] = _SERIALIZER.query("waitfor_completion", waitfor_completion, 'bool')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
# fmt: on
class FlowSessionsAdminOperations(object):
"""FlowSessionsAdminOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create_flow_session(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
session_id, # type: str
waitfor_completion=False, # type: Optional[bool]
body=None, # type: Optional["_models.CreateFlowSessionRequest"]
**kwargs # type: Any
):
# type: (...) -> str
"""create_flow_session.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param session_id:
:type session_id: str
:param waitfor_completion:
:type waitfor_completion: bool
:param body:
:type body: ~flow.models.CreateFlowSessionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateFlowSessionRequest')
else:
_json = None
request = build_create_flow_session_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
session_id=session_id,
content_type=content_type,
json=_json,
waitfor_completion=waitfor_completion,
template_url=self.create_flow_session.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_flow_session.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/FlowSessionsAdmin/{sessionId}'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flow_sessions_admin_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_flow_sessions_admin_operations.py",
"repo_id": "promptflow",
"token_count": 2613
} | 50 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=protected-access
import os
import uuid
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, Optional, TypeVar, Union
from azure.ai.ml._artifacts._blob_storage_helper import BlobStorageClient
from azure.ai.ml._artifacts._gen2_storage_helper import Gen2StorageClient
from azure.ai.ml._azure_environments import _get_storage_endpoint_from_metadata
from azure.ai.ml._restclient.v2022_10_01.models import DatastoreType
from azure.ai.ml._scope_dependent_operations import OperationScope
from azure.ai.ml._utils._arm_id_utils import (
AMLNamedArmId,
get_resource_name_from_arm_id,
is_ARM_id_for_resource,
remove_aml_prefix,
)
from azure.ai.ml._utils._asset_utils import (
IgnoreFile,
_build_metadata_dict,
_validate_path,
get_ignore_file,
get_object_hash,
)
from azure.ai.ml._utils._storage_utils import (
AzureMLDatastorePathUri,
get_artifact_path_from_storage_url,
get_storage_client,
)
from azure.ai.ml.constants._common import SHORT_URI_FORMAT, STORAGE_ACCOUNT_URLS
from azure.ai.ml.entities import Environment
from azure.ai.ml.entities._assets._artifacts.artifact import Artifact, ArtifactStorageInfo
from azure.ai.ml.entities._credentials import AccountKeyConfiguration
from azure.ai.ml.entities._datastore._constants import WORKSPACE_BLOB_STORE
from azure.ai.ml.exceptions import ErrorTarget, ValidationException
from azure.ai.ml.operations._datastore_operations import DatastoreOperations
from azure.storage.blob import BlobSasPermissions, generate_blob_sas
from azure.storage.filedatalake import FileSasPermissions, generate_file_sas
from ..._utils.logger_utils import LoggerFactory
from ._fileshare_storeage_helper import FlowFileStorageClient
module_logger = LoggerFactory.get_logger(__name__)
def _get_datastore_name(*, datastore_name: Optional[str] = WORKSPACE_BLOB_STORE) -> str:
datastore_name = WORKSPACE_BLOB_STORE if not datastore_name else datastore_name
try:
datastore_name = get_resource_name_from_arm_id(datastore_name)
except (ValueError, AttributeError, ValidationException):
module_logger.debug("datastore_name %s is not a full arm id. Proceed with a shortened name.\n", datastore_name)
datastore_name = remove_aml_prefix(datastore_name)
if is_ARM_id_for_resource(datastore_name):
datastore_name = get_resource_name_from_arm_id(datastore_name)
return datastore_name
def get_datastore_info(operations: DatastoreOperations, name: str) -> Dict[str, str]:
"""Get datastore account, type, and auth information."""
datastore_info = {}
if name:
datastore = operations.get(name, include_secrets=True)
else:
datastore = operations.get_default(include_secrets=True)
storage_endpoint = _get_storage_endpoint_from_metadata()
credentials = datastore.credentials
datastore_info["storage_type"] = datastore.type
datastore_info["storage_account"] = datastore.account_name
datastore_info["account_url"] = STORAGE_ACCOUNT_URLS[datastore.type].format(
datastore.account_name, storage_endpoint
)
if isinstance(credentials, AccountKeyConfiguration):
datastore_info["credential"] = credentials.account_key
else:
try:
datastore_info["credential"] = credentials.sas_token
except Exception as e: # pylint: disable=broad-except
if not hasattr(credentials, "sas_token"):
datastore_info["credential"] = operations._credential
else:
raise e
if datastore.type == DatastoreType.AZURE_BLOB:
datastore_info["container_name"] = str(datastore.container_name)
elif datastore.type == DatastoreType.AZURE_DATA_LAKE_GEN2:
datastore_info["container_name"] = str(datastore.filesystem)
elif datastore.type == DatastoreType.AZURE_FILE:
datastore_info["container_name"] = str(datastore.file_share_name)
else:
raise Exception(
f"Datastore type {datastore.type} is not supported for uploads. "
f"Supported types are {DatastoreType.AZURE_BLOB} and {DatastoreType.AZURE_DATA_LAKE_GEN2}."
)
return datastore_info
def list_logs_in_datastore(ds_info: Dict[str, str], prefix: str, legacy_log_folder_name: str) -> Dict[str, str]:
"""Returns a dictionary of file name to blob or data lake uri with SAS token, matching the structure of
RunDetails.logFiles.
legacy_log_folder_name: the name of the folder in the datastore that contains the logs
/azureml-logs/*.txt is the legacy log structure for commandJob and sweepJob
/logs/azureml/*.txt is the legacy log structure for pipeline parent Job
"""
if ds_info["storage_type"] not in [
DatastoreType.AZURE_BLOB,
DatastoreType.AZURE_DATA_LAKE_GEN2,
]:
raise Exception("Only Blob and Azure DataLake Storage Gen2 datastores are supported.")
storage_client = get_storage_client(
credential=ds_info["credential"],
container_name=ds_info["container_name"],
storage_account=ds_info["storage_account"],
storage_type=ds_info["storage_type"],
)
items = storage_client.list(starts_with=prefix + "/user_logs/")
# Append legacy log files if present
items.extend(storage_client.list(starts_with=prefix + legacy_log_folder_name))
log_dict = {}
for item_name in items:
sub_name = item_name.split(prefix + "/")[1]
if isinstance(storage_client, BlobStorageClient):
token = generate_blob_sas(
account_name=ds_info["storage_account"],
container_name=ds_info["container_name"],
blob_name=item_name,
account_key=ds_info["credential"],
permission=BlobSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(minutes=30),
)
elif isinstance(storage_client, Gen2StorageClient):
token = generate_file_sas( # pylint: disable=no-value-for-parameter
account_name=ds_info["storage_account"],
file_system_name=ds_info["container_name"],
file_name=item_name,
credential=ds_info["credential"],
permission=FileSasPermissions(read=True),
expiry=datetime.utcnow() + timedelta(minutes=30),
)
log_dict[sub_name] = "{}/{}/{}?{}".format(ds_info["account_url"], ds_info["container_name"], item_name, token)
return log_dict
def _get_default_datastore_info(datastore_operation):
return get_datastore_info(datastore_operation, None)
def upload_artifact(
local_path: str,
datastore_operation: DatastoreOperations,
operation_scope: OperationScope,
datastore_name: Optional[str],
asset_hash: Optional[str] = None,
show_progress: bool = True,
asset_name: Optional[str] = None,
asset_version: Optional[str] = None,
ignore_file: IgnoreFile = IgnoreFile(None),
sas_uri=None,
) -> ArtifactStorageInfo:
"""Upload local file or directory to datastore."""
if sas_uri:
storage_client = get_storage_client(credential=None, storage_account=None, account_url=sas_uri)
else:
datastore_name = _get_datastore_name(datastore_name=datastore_name)
datastore_info = get_datastore_info(datastore_operation, datastore_name)
storage_client = FlowFileStorageClient(
credential=datastore_info["credential"],
file_share_name=datastore_info["container_name"],
account_url=datastore_info["account_url"],
azure_cred=datastore_operation._credential,
)
artifact_info = storage_client.upload(
local_path,
asset_hash=asset_hash,
show_progress=show_progress,
name=asset_name,
version=asset_version,
ignore_file=ignore_file,
)
artifact_info["remote path"] = os.path.join(
storage_client.directory_client.directory_path, artifact_info["remote path"]
)
return artifact_info
def download_artifact(
starts_with: Union[str, os.PathLike],
destination: str,
datastore_operation: DatastoreOperations,
datastore_name: Optional[str],
datastore_info: Optional[Dict] = None,
) -> str:
"""Download datastore path to local file or directory.
:param Union[str, os.PathLike] starts_with: Prefix of blobs to download
:param str destination: Path that files will be written to
:param DatastoreOperations datastore_operation: Datastore operations
:param Optional[str] datastore_name: name of datastore
:param Dict datastore_info: the return value of invoking get_datastore_info
:return str: Path that files were written to
"""
starts_with = starts_with.as_posix() if isinstance(starts_with, Path) else starts_with
datastore_name = _get_datastore_name(datastore_name=datastore_name)
if datastore_info is None:
datastore_info = get_datastore_info(datastore_operation, datastore_name)
storage_client = get_storage_client(**datastore_info)
storage_client.download(starts_with=starts_with, destination=destination)
return destination
def download_artifact_from_storage_url(
blob_url: str,
destination: str,
datastore_operation: DatastoreOperations,
datastore_name: Optional[str],
) -> str:
"""Download datastore blob URL to local file or directory."""
datastore_name = _get_datastore_name(datastore_name=datastore_name)
datastore_info = get_datastore_info(datastore_operation, datastore_name)
starts_with = get_artifact_path_from_storage_url(
blob_url=str(blob_url), container_name=datastore_info.get("container_name")
)
return download_artifact(
starts_with=starts_with,
destination=destination,
datastore_operation=datastore_operation,
datastore_name=datastore_name,
datastore_info=datastore_info,
)
def download_artifact_from_aml_uri(uri: str, destination: str, datastore_operation: DatastoreOperations):
"""Downloads artifact pointed to by URI of the form `azureml://...` to destination.
:param str uri: AzureML uri of artifact to download
:param str destination: Path to download artifact to
:param DatastoreOperations datastore_operation: datastore operations
:return str: Path that files were downloaded to
"""
parsed_uri = AzureMLDatastorePathUri(uri)
return download_artifact(
starts_with=parsed_uri.path,
destination=destination,
datastore_operation=datastore_operation,
datastore_name=parsed_uri.datastore,
)
def aml_datastore_path_exists(
uri: str, datastore_operation: DatastoreOperations, datastore_info: Optional[dict] = None
):
"""Checks whether `uri` of the form "azureml://" points to either a directory or a file.
:param str uri: azure ml datastore uri
:param DatastoreOperations datastore_operation: Datastore operation
:param dict datastore_info: return value of get_datastore_info
"""
parsed_uri = AzureMLDatastorePathUri(uri)
datastore_info = datastore_info or get_datastore_info(datastore_operation, parsed_uri.datastore)
return get_storage_client(**datastore_info).exists(parsed_uri.path)
def _upload_to_datastore(
operation_scope: OperationScope,
datastore_operation: DatastoreOperations,
path: Union[str, Path, os.PathLike],
artifact_type: str,
datastore_name: Optional[str] = None,
show_progress: bool = True,
asset_name: Optional[str] = None,
asset_version: Optional[str] = None,
asset_hash: Optional[str] = None,
ignore_file: Optional[IgnoreFile] = None,
sas_uri: Optional[str] = None, # contains registry sas url
) -> ArtifactStorageInfo:
_validate_path(path, _type=artifact_type)
if not ignore_file:
ignore_file = get_ignore_file(path)
if not asset_hash:
asset_hash = get_object_hash(path, ignore_file)
artifact = upload_artifact(
str(path),
datastore_operation,
operation_scope,
datastore_name,
show_progress=show_progress,
asset_hash=asset_hash,
asset_name=asset_name,
asset_version=asset_version,
ignore_file=ignore_file,
sas_uri=sas_uri,
)
return artifact
def _upload_and_generate_remote_uri(
operation_scope: OperationScope,
datastore_operation: DatastoreOperations,
path: Union[str, Path, os.PathLike],
artifact_type: str = ErrorTarget.ARTIFACT,
datastore_name: str = WORKSPACE_BLOB_STORE,
show_progress: bool = True,
) -> str:
# Asset name is required for uploading to a datastore
asset_name = str(uuid.uuid4())
artifact_info = _upload_to_datastore(
operation_scope=operation_scope,
datastore_operation=datastore_operation,
path=path,
datastore_name=datastore_name,
asset_name=asset_name,
artifact_type=artifact_type,
show_progress=show_progress,
)
path = artifact_info.relative_path
datastore = AMLNamedArmId(artifact_info.datastore_arm_id).asset_name
return SHORT_URI_FORMAT.format(datastore, path)
def _update_metadata(name, version, indicator_file, datastore_info) -> None:
storage_client = get_storage_client(**datastore_info)
if isinstance(storage_client, BlobStorageClient):
_update_blob_metadata(name, version, indicator_file, storage_client)
elif isinstance(storage_client, Gen2StorageClient):
_update_gen2_metadata(name, version, indicator_file, storage_client)
def _update_blob_metadata(name, version, indicator_file, storage_client) -> None:
container_client = storage_client.container_client
if indicator_file.startswith(storage_client.container):
indicator_file = indicator_file.split(storage_client.container)[1]
blob = container_client.get_blob_client(blob=indicator_file)
blob.set_blob_metadata(_build_metadata_dict(name=name, version=version))
def _update_gen2_metadata(name, version, indicator_file, storage_client) -> None:
artifact_directory_client = storage_client.file_system_client.get_directory_client(indicator_file)
artifact_directory_client.set_metadata(_build_metadata_dict(name=name, version=version))
T = TypeVar("T", bound=Artifact)
def _check_and_upload_path(
artifact: T,
asset_operations: Union["DataOperations", "ModelOperations", "CodeOperations", "FeatureSetOperations"],
artifact_type: str,
datastore_name: Optional[str] = None,
sas_uri: Optional[str] = None,
show_progress: bool = True,
):
"""Checks whether `artifact` is a path or a uri and uploads it to the datastore if necessary.
param T artifact: artifact to check and upload param
Union["DataOperations", "ModelOperations", "CodeOperations"]
asset_operations: the asset operations to use for uploading
param str datastore_name: the name of the datastore to upload to
param str sas_uri: the sas uri to use for uploading
"""
from azure.ai.ml._utils.utils import is_mlflow_uri, is_url
datastore_name = artifact.datastore
if (
hasattr(artifact, "local_path")
and artifact.local_path is not None
or (
hasattr(artifact, "path")
and artifact.path is not None
and not (is_url(artifact.path) or is_mlflow_uri(artifact.path))
)
):
path = (
Path(artifact.path)
if hasattr(artifact, "path") and artifact.path is not None
else Path(artifact.local_path)
)
if not path.is_absolute():
path = Path(artifact.base_path, path).resolve()
uploaded_artifact = _upload_to_datastore(
asset_operations._operation_scope,
asset_operations._datastore_operation,
path,
datastore_name=datastore_name,
asset_name=artifact.name,
asset_version=str(artifact.version),
asset_hash=artifact._upload_hash if hasattr(artifact, "_upload_hash") else None,
sas_uri=sas_uri,
artifact_type=artifact_type,
show_progress=show_progress,
ignore_file=getattr(artifact, "_ignore_file", None),
)
return uploaded_artifact
def _check_and_upload_env_build_context(
environment: Environment,
operations: "EnvironmentOperations",
sas_uri=None,
show_progress: bool = True,
) -> Environment:
if environment.path:
uploaded_artifact = _upload_to_datastore(
operations._operation_scope,
operations._datastore_operation,
environment.path,
asset_name=environment.name,
asset_version=str(environment.version),
asset_hash=environment._upload_hash,
sas_uri=sas_uri,
artifact_type=ErrorTarget.ENVIRONMENT,
datastore_name=environment.datastore,
show_progress=show_progress,
)
# TODO: Depending on decision trailing "/" needs to stay or not. EMS requires it to be present
environment.build.path = uploaded_artifact.full_storage_path + "/"
return environment
| promptflow/src/promptflow/promptflow/azure/operations/_artifact_utilities.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/operations/_artifact_utilities.py",
"repo_id": "promptflow",
"token_count": 6848
} | 51 |
import docutils.nodes
from docutils.core import publish_doctree
class DocstringParser:
@staticmethod
def parse(docstring: str):
doctree = publish_doctree(docstring)
description = doctree[0].astext()
params = {}
for field in doctree.traverse(docutils.nodes.field):
field_name = field[0].astext()
field_body = field[1].astext()
if field_name.startswith("param"):
param_name = field_name.split(" ")[1]
if param_name not in params:
params[param_name] = {}
params[param_name]["description"] = field_body
if field_name.startswith("type"):
param_name = field_name.split(" ")[1]
if param_name not in params:
params[param_name] = {}
params[param_name]["type"] = field_body
return description, params
| promptflow/src/promptflow/promptflow/executor/_docstring_parser.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_docstring_parser.py",
"repo_id": "promptflow",
"token_count": 447
} | 52 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from dataclasses import dataclass
from datetime import datetime
from promptflow.contracts.run_info import RunInfo
@dataclass
class CacheRecord:
run_id: str
hash_id: str
flow_run_id: str
flow_id: str
cache_string: str
end_time: datetime
class AbstractCacheStorage:
def get_cache_record_list(hash_id: str) -> CacheRecord:
pass
def persist_cache_result(run_info: RunInfo):
pass
| promptflow/src/promptflow/promptflow/storage/_cache_storage.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/storage/_cache_storage.py",
"repo_id": "promptflow",
"token_count": 189
} | 53 |
import json
import multiprocessing
import threading
from pathlib import Path
from tempfile import mkdtemp
from typing import Optional, Tuple, Union
import pytest
from promptflow._constants import FlowLanguage
from promptflow._utils.exception_utils import ExceptionPresenter
from promptflow.batch._batch_engine import BatchEngine
from promptflow.batch._csharp_executor_proxy import CSharpExecutorProxy
from promptflow.batch._result import BatchResult
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, ValidationException
from promptflow.executor._errors import ConnectionNotFound
from promptflow.storage._run_storage import AbstractRunStorage
from ..mock_execution_server import run_executor_server
from ..utils import MemoryRunStorage, get_flow_folder, get_flow_inputs_file, get_yaml_file
@pytest.mark.unittest
class TestCSharpExecutorProxy:
def setup_method(self):
BatchEngine.register_executor(FlowLanguage.CSharp, MockCSharpExecutorProxy)
def test_batch(self):
# submit a batch run
_, batch_result = self._submit_batch_run()
assert batch_result.status == Status.Completed
assert batch_result.completed_lines == batch_result.total_lines
assert batch_result.system_metrics.duration > 0
assert batch_result.completed_lines > 0
def test_batch_execution_error(self):
# submit a batch run
_, batch_result = self._submit_batch_run(has_error=True)
assert batch_result.status == Status.Completed
assert batch_result.total_lines == 3
assert batch_result.failed_lines == 1
assert batch_result.system_metrics.duration > 0
def test_batch_validation_error(self):
# prepare the init error file to mock the validation error
error_message = "'test_connection' not found."
test_exception = ConnectionNotFound(message=error_message)
error_dict = ExceptionPresenter.create(test_exception).to_dict()
init_error_file = Path(mkdtemp()) / "init_error.json"
with open(init_error_file, "w") as file:
json.dump(error_dict, file)
# submit a batch run
with pytest.raises(ValidationException) as e:
self._submit_batch_run(init_error_file=init_error_file)
assert error_message in e.value.message
assert e.value.error_codes == ["UserError", "ValidationError"]
assert e.value.target == ErrorTarget.BATCH
def test_batch_cancel(self):
# use a thread to submit a batch run
batch_engine, batch_run_thread = self._submit_batch_run(run_in_thread=True)
assert batch_engine._is_canceled is False
batch_run_thread.start()
# cancel the batch run
batch_engine.cancel()
batch_run_thread.join()
assert batch_engine._is_canceled is True
assert batch_result_global.status == Status.Canceled
assert batch_result_global.system_metrics.duration > 0
def _submit_batch_run(
self, run_in_thread=False, has_error=False, init_error_file=None
) -> Union[Tuple[BatchEngine, threading.Thread], Tuple[BatchEngine, BatchResult]]:
flow_folder = "csharp_flow"
mem_run_storage = MemoryRunStorage()
# init the batch engine
batch_engine = BatchEngine(
get_yaml_file(flow_folder),
get_flow_folder(flow_folder),
storage=mem_run_storage,
has_error=has_error,
init_error_file=init_error_file,
)
# prepare the inputs
input_dirs = {"data": get_flow_inputs_file(flow_folder)}
inputs_mapping = {"question": "${data.question}"}
output_dir = Path(mkdtemp())
if run_in_thread:
return batch_engine, threading.Thread(
target=self._batch_run_in_thread, args=(batch_engine, input_dirs, inputs_mapping, output_dir)
)
else:
return batch_engine, batch_engine.run(input_dirs, inputs_mapping, output_dir)
def _batch_run_in_thread(self, batch_engine: BatchEngine, input_dirs, inputs_mapping, output_dir):
global batch_result_global
batch_result_global = batch_engine.run(input_dirs, inputs_mapping, output_dir)
class MockCSharpExecutorProxy(CSharpExecutorProxy):
def __init__(self, process: multiprocessing.Process, port: str):
self._process = process
self._port = port
@classmethod
async def create(
cls,
flow_file: Path,
working_dir: Optional[Path] = None,
*,
connections: Optional[dict] = None,
storage: Optional[AbstractRunStorage] = None,
**kwargs,
) -> "MockCSharpExecutorProxy":
"""Create a new executor"""
has_error = kwargs.get("has_error", False)
init_error_file = kwargs.get("init_error_file", None)
port = cls.find_available_port()
process = multiprocessing.Process(
target=run_executor_server,
args=(
int(port),
has_error,
init_error_file,
),
)
process.start()
executor_proxy = cls(process, port)
await executor_proxy.ensure_executor_startup(init_error_file)
return executor_proxy
async def destroy(self):
"""Destroy the executor"""
if self._process and self._process.is_alive():
self._process.terminate()
try:
self._process.join(timeout=5)
except TimeoutError:
self._process.kill()
def _is_executor_active(self):
return self._process and self._process.is_alive()
| promptflow/src/promptflow/tests/executor/e2etests/test_csharp_executor_proxy.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/e2etests/test_csharp_executor_proxy.py",
"repo_id": "promptflow",
"token_count": 2303
} | 54 |
{
"custom_llm_tool.TestCustomLLMTool.call": {
"class_name": "TestCustomLLMTool",
"function": "call",
"inputs": {
"connection": {"type": ["AzureOpenAIConnection"]},
"connection_2": {"type": ["AzureOpenAIConnection"]},
"api": {"type": ["string"]},
"template": {"type": ["PromptTemplate"]}
},
"module": "custom_llm_tool",
"name": "Test Custom LLM Tool",
"description": "Test Custom LLM Tool",
"type": "python"
}
}
| promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/package_tool_definition.json/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/package_tool_definition.json",
"repo_id": "promptflow",
"token_count": 261
} | 55 |
import logging
import sys
import time
from multiprocessing.pool import ThreadPool
import pytest
from dateutil.parser import parse
from promptflow._core.log_manager import NodeLogManager, NodeLogWriter
RUN_ID = "dummy_run_id"
NODE_NAME = "dummy_node"
LINE_NUMBER = 1
def assert_print_result(i: int, run_logger: NodeLogWriter):
run_id = f"{RUN_ID}-{i}"
run_logger.set_node_info(run_id, NODE_NAME, LINE_NUMBER)
time.sleep(i / 10)
print(i)
assert_datetime_prefix(run_logger.get_log(run_id), str(i) + "\n")
def is_datetime(string: str) -> bool:
"""Check if a string follows datetime format."""
try:
parse(string)
return True
except ValueError:
return False
def assert_datetime_prefix(string: str, expected_str: str):
"""Assert if string has a datetime prefix, such as:
[2023-04-17T07:49:54+0000] example string
"""
datetime_prefix = string[string.index("[") + 1 : string.index("]")]
inner_str = string[string.index("]") + 2 :]
assert is_datetime(datetime_prefix)
assert inner_str == expected_str
@pytest.mark.unittest
class TestNodeLogManager:
def test_get_logs(self):
with NodeLogManager(record_datetime=False) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
print("test2")
print("test stderr", file=sys.stderr)
assert lm.get_logs(RUN_ID).get("stdout") == "test\ntest2\n"
assert lm.get_logs(RUN_ID).get("stderr") == "test stderr\n"
lm.clear_node_context(RUN_ID)
assert lm.get_logs(RUN_ID).get("stdout") is None
assert lm.get_logs(RUN_ID).get("stderr") is None
def test_logging(self):
with NodeLogManager(record_datetime=False) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
stdout_logger = logging.getLogger("stdout")
stderr_logger = logging.getLogger("stderr")
stdout_logger.addHandler(logging.StreamHandler(stream=sys.stdout))
stderr_logger.addHandler(logging.StreamHandler(stream=sys.stderr))
stdout_logger.warning("test stdout")
stderr_logger.warning("test stderr")
logs = lm.get_logs(RUN_ID)
assert logs.get("stdout") == "test stdout\n"
assert logs.get("stderr") == "test stderr\n"
def test_exit_context_manager(self):
with NodeLogManager() as lm:
assert lm.stdout_logger is sys.stdout
assert lm.stdout_logger != sys.stdout
def test_datetime_prefix(self):
with NodeLogManager(record_datetime=True) as lm:
lm.set_node_context(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
print("test2")
output = lm.get_logs(RUN_ID).get("stdout")
outputs = output.split("\n")
assert_datetime_prefix(outputs[0], "test")
assert_datetime_prefix(outputs[1], "test2")
assert outputs[2] == ""
@pytest.mark.unittest
class TestNodeLogWriter:
def test_set_node_info(self):
run_logger = NodeLogWriter(sys.stdout)
assert run_logger.get_log(RUN_ID) is None
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
assert run_logger.get_log(RUN_ID) == ""
def test_clear_node_info(self):
run_logger = NodeLogWriter(sys.stdout)
run_logger.clear_node_info(RUN_ID)
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
run_logger.clear_node_info(RUN_ID)
assert run_logger.run_id_to_stdout.get(RUN_ID) is None
def test_get_log(self):
run_logger = NodeLogWriter(sys.stdout)
sys.stdout = run_logger
print("test")
assert run_logger.get_log(RUN_ID) is None
run_logger.set_node_info(RUN_ID, NODE_NAME, LINE_NUMBER)
print("test")
assert_datetime_prefix(run_logger.get_log(RUN_ID), "test\n")
run_logger.clear_node_info(RUN_ID)
assert run_logger.get_log(RUN_ID) is None
def test_multi_thread(self):
run_logger = NodeLogWriter(sys.stdout)
sys.stdout = run_logger
with ThreadPool(processes=10) as pool:
results = pool.starmap(assert_print_result, ((i, run_logger) for i in range(10)))
for r in results:
pass
| promptflow/src/promptflow/tests/executor/unittests/_core/test_log_manager.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_core/test_log_manager.py",
"repo_id": "promptflow",
"token_count": 2053
} | 56 |
from pathlib import Path
from unittest.mock import Mock
import pytest
from promptflow._utils.multimedia_data_converter import (
AbstractMultimediaInfoConverter,
MultimediaConverter,
MultimediaFormatAdapter20231201,
MultimediaInfo,
ResourceType,
)
@pytest.mark.unittest
class TestMultimediaConverter:
def test_convert_content_recursively(self):
converter = MultimediaConverter(Path("flow.yaml"))
# Don't convert anything.
content = {
"image": {"data:image/jpg;url": "https://example.com/logo.jpg"},
"images": [
{"data:image/jpg;url": "https://example.com/logo.jpg"},
{"data:image/jpg;base64": "base64 string"},
],
"object": {"image": {"data:image/png;path": "random_path"}, "other_data": "other_data"},
}
mock_converter = Mock(spec=AbstractMultimediaInfoConverter)
mock_converter.convert.side_effect = lambda x: x
result = converter.convert_content_recursively(content, mock_converter)
assert result == content
# Convert all valid images.
mock_converter.convert.side_effect = lambda x: MultimediaInfo("image/jpg", ResourceType("path"), "logo.jpg")
result = converter.convert_content_recursively(content, mock_converter)
expected_result = {
"image": {"data:image/jpg;path": "logo.jpg"},
"images": [
{"data:image/jpg;path": "logo.jpg"},
{"data:image/jpg;path": "logo.jpg"},
],
"object": {"image": {"data:image/jpg;path": "logo.jpg"}, "other_data": "other_data"},
}
assert result == expected_result
@pytest.mark.unittest
class TestMultimediaFormatAdapter20231201:
def test_is_valid_format(self):
adapter = MultimediaFormatAdapter20231201()
assert adapter.is_valid_format({"data:image/jpg;path": "logo.jpg"})
assert adapter.is_valid_format({"data:image/jpg;url": "https://example.com/logo.jpg"})
assert not adapter.is_valid_format({"data:audio/mp3;path": "audio.mp3"})
assert not adapter.is_valid_format({"data:video/mp4;url": "https://example.com/video.mp4"})
def test_extract_info(self):
adapter = MultimediaFormatAdapter20231201()
# Valid formats
expected_result = MultimediaInfo("image/jpg", ResourceType.PATH, "random_path")
assert adapter.extract_info({"data:image/jpg;path": "random_path"}) == expected_result
expected_result = MultimediaInfo("image/jpg", ResourceType.URL, "random_url")
assert adapter.extract_info({"data:image/jpg;url": "random_url"}) == expected_result
expected_result = MultimediaInfo("image/jpg", ResourceType.BASE64, "random_base64")
assert adapter.extract_info({"data:image/jpg;base64": "random_base64"}) == expected_result
# Invalid format
assert adapter.extract_info({"data:video/mp4;url": "https://example.com/video.mp4"}) is None
assert adapter.extract_info({"data:image/mp4;url2": "https://example.com/video.mp4"}) is None
assert adapter.extract_info({"content:image/mp4;path": "random_path"}) is None
def test_create_data(self):
adapter = MultimediaFormatAdapter20231201()
info = MultimediaInfo("image/jpg", ResourceType.PATH, "random_path")
expected_result = {"data:image/jpg;path": "random_path"}
assert adapter.create_data(info) == expected_result
info = MultimediaInfo("image/jpg", ResourceType.URL, "random_url")
expected_result = {"data:image/jpg;url": "random_url"}
assert adapter.create_data(info) == expected_result
info = MultimediaInfo("image/jpg", ResourceType.BASE64, "base64 string")
expected_result = {"data:image/jpg;base64": "base64 string"}
assert adapter.create_data(info) == expected_result
# Bad case when client provides invalid resource type.
info = MultimediaInfo("image/jpg", "path", "base64 string")
expected_result = {"data:image/jpg;base64": "base64 string"}
with pytest.raises(AttributeError):
adapter.create_data(info)
| promptflow/src/promptflow/tests/executor/unittests/_utils/test_multimedia_data_converter.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_utils/test_multimedia_data_converter.py",
"repo_id": "promptflow",
"token_count": 1685
} | 57 |
import pytest
from promptflow.contracts.run_mode import RunMode
@pytest.mark.unittest
@pytest.mark.parametrize(
"run_mode, expected",
[
("Test", RunMode.Test),
("SingleNode", RunMode.SingleNode),
("Batch", RunMode.Batch),
("Default", RunMode.Test),
],
)
def test_parse(run_mode, expected):
assert RunMode.parse(run_mode) == expected
@pytest.mark.unittest
def test_parse_invalid():
with pytest.raises(ValueError):
RunMode.parse(123)
| promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_mode.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_mode.py",
"repo_id": "promptflow",
"token_count": 210
} | 58 |
import json
import pytest
from ..recording_utilities import is_live
testdata = """The event sourcing pattern involves using an append-only store to record the full series
of actions on that data. The Azure Cosmos DB change feed is a great choice as a central data store in
event sourcing architectures in which all data ingestion is modeled as writes (no updates or deletes).
In this case, each write to Azure Cosmos DB is an \"event,\" so there's a full record of past events
in the change feed. Typical uses of the events published by the central event store are to maintain materialized
views or to integrate with external systems. Because there's no time limit for retention in the change feed latest
version mode, you can replay all past events by reading from the beginning of your Azure Cosmos DB container's
change feed. You can even have multiple change feed consumers subscribe to the same container's change feed."""
@pytest.mark.skipif(condition=not is_live(), reason="serving tests, only run in live mode.")
@pytest.mark.usefixtures("flow_serving_client_remote_connection")
@pytest.mark.e2etest
def test_local_serving_api_with_remote_connection(flow_serving_client_remote_connection):
response = flow_serving_client_remote_connection.get("/health")
assert b'{"status":"Healthy","version":"0.0.1"}' in response.data
response = flow_serving_client_remote_connection.post("/score", data=json.dumps({"text": "hi"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "output_prompt" in json.loads(response.data.decode())
@pytest.mark.skipif(condition=not is_live(), reason="serving tests, only run in live mode.")
@pytest.mark.usefixtures("flow_serving_client_with_prt_config_env")
@pytest.mark.e2etest
def test_azureml_serving_api_with_prt_config_env(flow_serving_client_with_prt_config_env):
response = flow_serving_client_with_prt_config_env.get("/health")
assert b'{"status":"Healthy","version":"0.0.1"}' in response.data
response = flow_serving_client_with_prt_config_env.post("/score", data=json.dumps({"text": "hi"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "output_prompt" in json.loads(response.data.decode())
response = flow_serving_client_with_prt_config_env.get("/")
assert b"Welcome to promptflow app" in response.data
@pytest.mark.skipif(condition=not is_live(), reason="serving tests, only run in live mode.")
@pytest.mark.usefixtures("flow_serving_client_with_connection_provider_env")
@pytest.mark.e2etest
def test_azureml_serving_api_with_conn_provider_env(flow_serving_client_with_connection_provider_env):
response = flow_serving_client_with_connection_provider_env.get("/health")
assert b'{"status":"Healthy","version":"0.0.1"}' in response.data
response = flow_serving_client_with_connection_provider_env.post("/score", data=json.dumps({"text": "hi"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "output_prompt" in json.loads(response.data.decode())
response = flow_serving_client_with_connection_provider_env.get("/")
assert b"Welcome to promptflow app" in response.data
@pytest.mark.skipif(condition=not is_live(), reason="serving tests, only run in live mode.")
@pytest.mark.usefixtures("flow_serving_client_with_connection_provider_env")
@pytest.mark.e2etest
def test_azureml_serving_api_with_aml_resource_id_env(flow_serving_client_with_aml_resource_id_env):
response = flow_serving_client_with_aml_resource_id_env.get("/health")
assert b'{"status":"Healthy","version":"0.0.1"}' in response.data
response = flow_serving_client_with_aml_resource_id_env.post("/score", data=json.dumps({"text": "hi"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "output_prompt" in json.loads(response.data.decode())
@pytest.mark.skipif(condition=not is_live(), reason="serving tests, only run in live mode.")
@pytest.mark.usefixtures("serving_client_with_connection_name_override")
@pytest.mark.e2etest
def test_azureml_serving_api_with_connection_name_override(serving_client_with_connection_name_override):
response = serving_client_with_connection_name_override.get("/health")
assert b'{"status":"Healthy","version":"0.0.1"}' in response.data
response = serving_client_with_connection_name_override.post("/score", data=json.dumps({"text": testdata}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "api_base" not in json.loads(response.data.decode()).values()
@pytest.mark.usefixtures("serving_client_with_connection_data_override")
@pytest.mark.e2etest
def test_azureml_serving_api_with_connection_data_override(serving_client_with_connection_data_override):
response = serving_client_with_connection_data_override.get("/health")
assert b'{"status":"Healthy","version":"0.0.1"}' in response.data
response = serving_client_with_connection_data_override.post("/score", data=json.dumps({"text": "hi"}))
assert (
response.status_code == 200
), f"Response code indicates error {response.status_code} - {response.data.decode()}"
assert "api_base" in json.loads(response.data.decode()).values()
| promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_serve.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_serve.py",
"repo_id": "promptflow",
"token_count": 1820
} | 59 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import mock
import pytest
from promptflow import PFClient
from promptflow._sdk.operations._connection_operations import ConnectionOperations
from promptflow._sdk.operations._local_azure_connection_operations import LocalAzureConnectionOperations
from promptflow.exceptions import UserErrorException
from ..recording_utilities import is_live
AZUREML_RESOURCE_PROVIDER = "Microsoft.MachineLearningServices"
RESOURCE_ID_FORMAT = "/subscriptions/{}/resourceGroups/{}/providers/{}/workspaces/{}"
@pytest.mark.sdk_test
@pytest.mark.e2etest
class TestPFClient:
# Test pf client when connection provider is azureml.
# This tests suites need azure dependencies.
@pytest.mark.skipif(condition=not is_live(), reason="This test requires an actual PFClient")
def test_connection_provider(self, subscription_id: str, resource_group_name: str, workspace_name: str):
target = "promptflow._sdk._pf_client.Configuration"
with mock.patch(target) as mocked:
mocked.return_value.get_connection_provider.return_value = "abc"
with pytest.raises(UserErrorException) as e:
client = PFClient()
assert client.connections
assert "Unsupported connection provider" in str(e.value)
with mock.patch(target) as mocked:
mocked.return_value.get_connection_provider.return_value = "azureml:xx"
with pytest.raises(ValueError) as e:
client = PFClient()
assert client.connections
assert "Malformed connection provider string" in str(e.value)
with mock.patch(target) as mocked:
mocked.return_value.get_connection_provider.return_value = "local"
client = PFClient()
assert isinstance(client.connections, ConnectionOperations)
with mock.patch(target) as mocked:
mocked.return_value.get_connection_provider.return_value = "azureml:" + RESOURCE_ID_FORMAT.format(
subscription_id, resource_group_name, AZUREML_RESOURCE_PROVIDER, workspace_name
)
client = PFClient()
assert isinstance(client.connections, LocalAzureConnectionOperations)
client = PFClient(
config={
"connection.provider": "azureml:"
+ RESOURCE_ID_FORMAT.format(
subscription_id, resource_group_name, AZUREML_RESOURCE_PROVIDER, workspace_name
)
}
)
assert isinstance(client.connections, LocalAzureConnectionOperations)
def test_local_azure_connection_extract_workspace(self):
res = LocalAzureConnectionOperations._extract_workspace(
"azureml://subscriptions/123/resourceGroups/456/providers/Microsoft.MachineLearningServices/workspaces/789"
)
assert res == ("123", "456", "789")
res = LocalAzureConnectionOperations._extract_workspace(
"azureml://subscriptions/123/resourcegroups/456/workspaces/789"
)
assert res == ("123", "456", "789")
with pytest.raises(ValueError) as e:
LocalAzureConnectionOperations._extract_workspace("azureml:xx")
assert "Malformed connection provider string" in str(e.value)
with pytest.raises(ValueError) as e:
LocalAzureConnectionOperations._extract_workspace(
"azureml://subscriptions/123/resourceGroups/456/providers/Microsoft.MachineLearningServices/workspaces/"
)
assert "Malformed connection provider string" in str(e.value)
| promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_pf_client.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_pf_client.py",
"repo_id": "promptflow",
"token_count": 1445
} | 60 |
import uuid
from pathlib import Path
import pydash
import pytest
from promptflow._sdk._constants import SCRUBBED_VALUE, CustomStrongTypeConnectionConfigs
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities import CustomStrongTypeConnection
from promptflow.contracts.types import Secret
class MyCustomConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
_client = PFClient()
TEST_ROOT = Path(__file__).parent.parent.parent
CONNECTION_ROOT = TEST_ROOT / "test_configs/connections"
@pytest.mark.cli_test
@pytest.mark.e2etest
class TestCustomStrongTypeConnection:
def test_connection_operations(self):
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = MyCustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
# Create
_client.connections.create_or_update(conn)
# Get
result = _client.connections.get(name)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "custom",
"configs": {
"api_base": "test",
"promptflow.connection.custom_type": "MyCustomConnection",
"promptflow.connection.module": "sdk_cli_test.e2etests.test_custom_strong_type_connection",
},
"secrets": {"api_key": "******"},
}
# Update
conn.configs["api_base"] = "test2"
result = _client.connections.create_or_update(conn)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "custom",
"configs": {
"api_base": "test2",
"promptflow.connection.custom_type": "MyCustomConnection",
"promptflow.connection.module": "sdk_cli_test.e2etests.test_custom_strong_type_connection",
},
"secrets": {"api_key": "******"},
}
# List
result = _client.connections.list()
assert len(result) > 0
# Delete
_client.connections.delete(name)
with pytest.raises(Exception) as e:
_client.connections.get(name)
assert "is not found." in str(e.value)
def test_connection_update(self):
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = MyCustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
# Create
_client.connections.create_or_update(conn)
# Get
custom_conn = _client.connections.get(name)
assert pydash.omit(custom_conn._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "custom",
"configs": {
"api_base": "test",
"promptflow.connection.custom_type": "MyCustomConnection",
"promptflow.connection.module": "sdk_cli_test.e2etests.test_custom_strong_type_connection",
},
"secrets": {"api_key": "******"},
}
# Update
custom_conn.configs["api_base"] = "test2"
result = _client.connections.create_or_update(custom_conn)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "custom",
"configs": {
"api_base": "test2",
"promptflow.connection.custom_type": "MyCustomConnection",
"promptflow.connection.module": "sdk_cli_test.e2etests.test_custom_strong_type_connection",
},
"secrets": {"api_key": "******"},
}
# List
result = _client.connections.list()
assert len(result) > 0
# Delete
_client.connections.delete(name)
with pytest.raises(Exception) as e:
_client.connections.get(name)
assert "is not found." in str(e.value)
def test_connection_get_and_update(self):
# Test api key not updated
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = MyCustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
result = _client.connections.create_or_update(conn)
assert result.secrets["api_key"] == SCRUBBED_VALUE
# Update api_base only Assert no exception
result.configs["api_base"] = "test2"
result = _client.connections.create_or_update(result)
assert result._to_dict()["configs"]["api_base"] == "test2"
# Assert value not scrubbed
assert result._secrets["api_key"] == "test"
_client.connections.delete(name)
# Invalid update
with pytest.raises(Exception) as e:
result._secrets = {}
_client.connections.create_or_update(result)
assert "secrets ['api_key'] value invalid, please fill them" in str(e.value)
def test_connection_get_and_update_with_key(self):
# Test api key not updated
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = MyCustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
assert conn.api_base == "test"
assert conn.configs["api_base"] == "test"
result = _client.connections.create_or_update(conn)
converted_conn = result._convert_to_custom_strong_type(
module=__class__.__module__, to_class="MyCustomConnection"
)
assert isinstance(converted_conn, MyCustomConnection)
assert converted_conn.api_base == "test"
converted_conn.api_base = "test2"
assert converted_conn.api_base == "test2"
assert converted_conn.configs["api_base"] == "test2"
@pytest.mark.parametrize(
"file_name, expected_updated_item, expected_secret_item",
[
("custom_strong_type_connection.yaml", ("api_base", "new_value"), ("api_key", "<to-be-replaced>")),
],
)
def test_upsert_connection_from_file(
self, install_custom_tool_pkg, file_name, expected_updated_item, expected_secret_item
):
from promptflow._cli._pf._connection import _upsert_connection_from_file
name = f"Connection_{str(uuid.uuid4())[:4]}"
result = _upsert_connection_from_file(file=CONNECTION_ROOT / file_name, params_override=[{"name": name}])
assert result is not None
assert result.configs[CustomStrongTypeConnectionConfigs.PROMPTFLOW_MODULE_KEY] == "my_tool_package.connections"
update_file_name = f"update_{file_name}"
result = _upsert_connection_from_file(file=CONNECTION_ROOT / update_file_name, params_override=[{"name": name}])
# Test secrets not updated, and configs updated
assert (
result.configs[expected_updated_item[0]] == expected_updated_item[1]
), "Assert configs updated failed, expected: {}, actual: {}".format(
expected_updated_item[1], result.configs[expected_updated_item[0]]
)
assert (
result._secrets[expected_secret_item[0]] == expected_secret_item[1]
), "Assert secrets not updated failed, expected: {}, actual: {}".format(
expected_secret_item[1], result._secrets[expected_secret_item[0]]
)
| promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_custom_strong_type_connection.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_custom_strong_type_connection.py",
"repo_id": "promptflow",
"token_count": 3245
} | 61 |
import pytest
from promptflow._cli._pf.entry import get_parser_args
from promptflow._cli._utils import _get_cli_activity_name
def get_cli_activity_name(cmd):
prog, args = get_parser_args(list(cmd)[1:])
return _get_cli_activity_name(cli=prog, args=args)
@pytest.mark.unittest
class TestCliTimeConsume:
def test_pf_run_create(self, activity_name="pf.run.create") -> None:
assert get_cli_activity_name(
cmd=(
"pf",
"run",
"create",
"--flow",
"print_input_flow",
"--data",
"print_input_flow.jsonl",
)) == activity_name
def test_pf_run_update(self, activity_name="pf.run.update") -> None:
assert get_cli_activity_name(
cmd=(
"pf",
"run",
"update",
"--name",
"test_name",
"--set",
"description=test pf run update"
)) == activity_name
def test_pf_flow_test(self, activity_name="pf.flow.test"):
assert get_cli_activity_name(
cmd=(
"pf",
"flow",
"test",
"--flow",
"print_input_flow",
"--inputs",
"text=https://www.youtube.com/watch?v=o5ZQyXaAv1g",
)) == activity_name
def test_pf_flow_build(self, activity_name="pf.flow.build"):
assert get_cli_activity_name(
cmd=(
"pf",
"flow",
"build",
"--source",
"print_input_flow/flow.dag.yaml",
"--output",
"./",
"--format",
"docker",
)) == activity_name
def test_pf_connection_create(self, activity_name="pf.connection.create"):
assert get_cli_activity_name(
cmd=(
"pf",
"connection",
"create",
"--file",
"azure_openai_connection.yaml",
"--name",
"test_name",
)) == activity_name
def test_pf_connection_list(self, activity_name="pf.connection.list"):
assert get_cli_activity_name(cmd=("pf", "connection", "list")) == activity_name
| promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_cli_activity_name.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_cli_activity_name.py",
"repo_id": "promptflow",
"token_count": 1349
} | 62 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
from flask.app import Flask
from promptflow import PFClient
from .utils import PFSOperations
@pytest.fixture
def app() -> Flask:
from promptflow._sdk._service.app import create_app
app, _ = create_app()
app.config.update({"TESTING": True})
yield app
@pytest.fixture
def pfs_op(app: Flask) -> PFSOperations:
client = app.test_client()
return PFSOperations(client)
@pytest.fixture(scope="session")
def pf_client() -> PFClient:
return PFClient()
| promptflow/src/promptflow/tests/sdk_pfs_test/conftest.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_pfs_test/conftest.py",
"repo_id": "promptflow",
"token_count": 202
} | 63 |
name: my_custom_strong_type_connection
type: custom
custom_type: MyFirstConnection
module: my_tool_package.connections
package: test-custom-tools
package_version: 0.0.2
configs:
api_base: "This is my first connection."
secrets: # must-have
api_key: "<to-be-replaced>" | promptflow/src/promptflow/tests/test_configs/connections/custom_strong_type_connection.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/connections/custom_strong_type_connection.yaml",
"repo_id": "promptflow",
"token_count": 92
} | 64 |
{"name": "promptflow"} | promptflow/src/promptflow/tests/test_configs/datas/simple_hello_world.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/datas/simple_hello_world.jsonl",
"repo_id": "promptflow",
"token_count": 8
} | 65 |
entry: my_func
path: ./entry.py
nodes: [] | promptflow/src/promptflow/tests/test_configs/eager_flows/invalid_extra_fields_nodes/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/invalid_extra_fields_nodes/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 17
} | 66 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Experiment.schema.json
description: Basic experiment without script node
data:
- name: my_data
path: ../../flows/web_classification/data.jsonl
inputs:
- name: count
type: int
default: 3
nodes:
- name: gen_data
type: command
command: python generate_data.py --input-path ${inputs.input_path} --count ${inputs.count} --output-path ${outputs.output_path}
code: ./generate_data
inputs:
input_path: ${data.my_data}
count: ${inputs.count}
outputs:
output_path:
environment_variables:
CONNECTION_KEY: ${azure_open_ai_connection.api_key}
- name: main
type: flow
path: ../../flows/web_classification/flow.dag.yaml
inputs:
url: ${gen_data.outputs.output_path.url}
variant: ${summarize_text_content.variant_0}
environment_variables: {}
connections: {}
- name: eval
type: flow
path: ../../flows/eval-classification-accuracy
inputs:
groundtruth: ${data.my_data.answer} # No node can be named with "data"
prediction: ${main.outputs.category}
environment_variables: {}
connections: {}
- name: echo
type: command
command: echo ${inputs.input_path} > ${outputs.output_path}/output.txt
inputs:
input_path: ${main.outputs}
outputs:
output_path:
| promptflow/src/promptflow/tests/test_configs/experiments/basic-script-template/basic-script.exp.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/experiments/basic-script-template/basic-script.exp.yaml",
"repo_id": "promptflow",
"token_count": 541
} | 67 |
{
"line_process.completed": 3,
"aggregate.failed": 1
} | promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/expected_status_summary.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/expected_status_summary.json",
"repo_id": "promptflow",
"token_count": 28
} | 68 |
import random
import time
from promptflow import tool
@tool
def get_stock_eod_price(date: str, company: str):
"""Get the stock end of day price by date and symbol.
:param date: the date of the stock price. e.g. 2021-01-01
:type date: str
:param company: the company name like A, B, C
:type company: str
"""
print(f"Try to get the stock end of day price by date {date} and company {company}.")
# Sleep a random number between 0.2s and 1s for tracing purpose
time.sleep(random.uniform(0.2, 1))
return random.uniform(110, 130)
| promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/get_stock_eod_price.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/assistant-with-file/get_stock_eod_price.py",
"repo_id": "promptflow",
"token_count": 198
} | 69 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
text:
type: string
default: Python Hello World!
outputs:
output:
type: string
reference: ${llm.output}
nodes:
- name: hello_prompt
type: prompt
inputs:
text: ${inputs.text}
source:
type: code
path: hello.jinja2
- name: llm
type: llm
inputs:
prompt: ${hello_prompt.output}
deployment_name: gpt-35-turbo
model: gpt-3.5-turbo
max_tokens: '120'
source:
type: code
path: hello.jinja2
connection: azure_open_ai_connection
api: chat
node_variants: {}
| promptflow/src/promptflow/tests/test_configs/flows/basic_with_builtin_llm_node/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/basic_with_builtin_llm_node/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 258
} | 70 |
from promptflow import tool
from typing import Generator, List
def stream(question: str) -> Generator[str, None, None]:
for word in question:
yield word
@tool
def my_python_tool(chat_history: List[dict], question: str) -> dict:
return {"answer": stream(question)}
| promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/stream.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/stream.py",
"repo_id": "promptflow",
"token_count": 89
} | 71 |
from time import sleep
from promptflow import tool
@tool
def wait(**args) -> int:
sleep(5)
return str(args)
| promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/wait_long.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/wait_long.py",
"repo_id": "promptflow",
"token_count": 42
} | 72 |
from promptflow import tool
@tool
def collect(input1, input2: str="") -> str:
return {'double': input1, 'square': input2}
| promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/collect_node.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/collect_node.py",
"repo_id": "promptflow",
"token_count": 42
} | 73 |
from typing import List
from promptflow import tool
@tool
def test_print_input(input_str: List[str], input_bool: List[bool], input_list: List[List], input_dict: List[dict]):
assert input_bool[0] == False
assert input_list[0] == []
assert input_dict[0] == {}
print(input_str)
return input_str | promptflow/src/promptflow/tests/test_configs/flows/default_input/test_print_aggregation.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/default_input/test_print_aggregation.py",
"repo_id": "promptflow",
"token_count": 112
} | 74 |
from promptflow import tool
@tool
def merge_images(image_1: list, image_2: list, image_3: list):
res = set()
res.add(image_1[0])
res.add(image_2[0])
res.add(image_3[0])
return list(res)
| promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_simple_image/merge_images.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/eval_flow_with_simple_image/merge_images.py",
"repo_id": "promptflow",
"token_count": 93
} | 75 |
{
"CUSTOM_CONNECTION_AZURE_OPENAI_API_KEY": ""
} | promptflow/src/promptflow/tests/test_configs/flows/export/linux/settings.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/export/linux/settings.json",
"repo_id": "promptflow",
"token_count": 26
} | 76 |
from promptflow import tool
@tool
def print_val(val, origin_val):
print(val)
print(origin_val)
if not isinstance(origin_val, dict):
raise TypeError(f"key must be a dict, got {type(origin_val)}")
return val
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/print_val.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/print_val.py",
"repo_id": "promptflow",
"token_count": 90
} | 77 |
import package_not_exist
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_invalid_import/hello.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_invalid_import/hello.py",
"repo_id": "promptflow",
"token_count": 7
} | 78 |
from promptflow import tool
import random
import time
@tool
def my_python_tool_with_failed_line(idx: int, mod=5) -> int:
if idx % mod == 0:
while True:
time.sleep(60)
return idx
| promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/my_python_tool_with_failed_line.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/my_python_tool_with_failed_line.py",
"repo_id": "promptflow",
"token_count": 90
} | 79 |
{
"my_python_tool_with_failed_line_1.completed": 7,
"my_python_tool_with_failed_line_1.failed": 3,
"my_python_tool_with_failed_line_2.completed": 5,
"my_python_tool_with_failed_line_2.failed": 2
} | promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/expected_status_summary.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_partial_failure/expected_status_summary.json",
"repo_id": "promptflow",
"token_count": 96
} | 80 |
{
"name": "main",
"type": "python",
"inputs": {
"x": {
"type": [
"string"
]
}
},
"source": "dummy_utils/main.py",
"function": "main"
} | promptflow/src/promptflow/tests/test_configs/flows/script_with_import/dummy_utils/main.meta.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/script_with_import/dummy_utils/main.meta.json",
"repo_id": "promptflow",
"token_count": 93
} | 81 |
import statistics
from typing import List
from promptflow import tool
@tool
def aggregate_num(num: List[int]) -> int:
return statistics.mean(num)
| promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool_and_aggregate/aggregate_num.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool_and_aggregate/aggregate_num.py",
"repo_id": "promptflow",
"token_count": 45
} | 82 |
name: node_wrong_order
inputs:
text:
type: string
outputs:
result:
type: string
reference: ${third_node}
nodes:
- name: third_node
type: python
source:
type: code
path: test.py
inputs:
text: ${second_node}
- name: first_node
type: python
source:
type: code
path: test.py
inputs:
text: ${inputs.text}
- name: second_node
type: python
source:
type: code
path: test.py
inputs:
text: ${first_node}
| promptflow/src/promptflow/tests/test_configs/flows/unordered_nodes/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/unordered_nodes/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 195
} | 83 |
FOO=BAR
| promptflow/src/promptflow/tests/test_configs/runs/env_file/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/runs/env_file",
"repo_id": "promptflow",
"token_count": 6
} | 84 |
{"batch_size": 1} | promptflow/src/promptflow/tests/test_configs/runs/web_classification_variant_0_20231205_120253_104100/meta.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/runs/web_classification_variant_0_20231205_120253_104100/meta.json",
"repo_id": "promptflow",
"token_count": 7
} | 85 |
from enum import Enum
from promptflow.entities import InputSetting
from promptflow import tool
class UserType(str, Enum):
STUDENT = "student"
TEACHER = "teacher"
@tool(name=1, description=1)
def invalid_schema_type(input1: str) -> str:
return 'hello ' + input1
@tool(
name="invalid_input_settings",
description="This is my tool with enabled by value",
input_settings={
"teacher_id": InputSetting(enabled_by="invalid_input", enabled_by_value=[UserType.TEACHER]),
"student_id": InputSetting(enabled_by="invalid_input", enabled_by_value=[UserType.STUDENT]),
}
)
def invalid_input_settings(user_type: UserType, student_id: str = "", teacher_id: str = "") -> str:
pass
@tool(name="invalid_tool_icon", icon="mock_icon_path", icon_dark="mock_icon_path", icon_light="mock_icon_path")
def invalid_tool_icon(input1: str) -> str:
return 'hello ' + input1
| promptflow/src/promptflow/tests/test_configs/tools/invalid_tool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/tools/invalid_tool.py",
"repo_id": "promptflow",
"token_count": 333
} | 86 |
inputs: {}
outputs: {}
nodes:
- name: wrong_llm
type: llm
source:
type: code
path: wrong_llm.jinja2
inputs: {}
connection: custom_connection
| promptflow/src/promptflow/tests/test_configs/wrong_flows/flow_llm_with_wrong_conn/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/flow_llm_with_wrong_conn/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 65
} | 87 |
name: node_cycle
inputs:
text:
type: string
outputs:
result:
type: string
reference: ${second_node}
nodes:
- name: first_node
type: python
source:
type: code
path: test.py
inputs:
text: ${second_node}
aggregation: true
- name: second_node
type: python
source:
type: code
path: test.py
inputs:
text: ${first_node}
aggregation: true
| promptflow/src/promptflow/tests/test_configs/wrong_flows/nodes_cycle/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/nodes_cycle/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 155
} | 88 |
# Microsoft Open Source Code of Conduct
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
Resources:
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
- Contact [[email protected]](mailto:[email protected]) with questions or concerns
| promptflow/CODE_OF_CONDUCT.md/0 | {
"file_path": "promptflow/CODE_OF_CONDUCT.md",
"repo_id": "promptflow",
"token_count": 115
} | 0 |
Subsets and Splits