text
stringlengths 8
1.72M
| id
stringlengths 22
143
| metadata
dict | __index_level_0__
int64 0
104
|
---|---|---|---|
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from enum import Enum
from typing import Any, Dict, Union
import requests
from azure.ai.ml._restclient.v2023_06_01_preview.models import WorkspaceConnectionPropertiesV2BasicResource
from azure.ai.ml._scope_dependent_operations import (
OperationConfig,
OperationsContainer,
OperationScope,
_ScopeDependentOperations,
)
from azure.core.exceptions import ClientAuthenticationError
from promptflow._sdk.entities._connection import CustomConnection, _Connection
from promptflow._utils.retry_utils import http_retry_wrapper
from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller
from promptflow.azure._utils.gerneral import get_arm_token
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException
GET_CONNECTION_URL = (
"/subscriptions/{sub}/resourcegroups/{rg}/providers/Microsoft.MachineLearningServices"
"/workspaces/{ws}/connections/{name}/listsecrets?api-version=2023-04-01-preview"
)
LIST_CONNECTION_URL = (
"/subscriptions/{sub}/resourcegroups/{rg}/providers/Microsoft.MachineLearningServices"
"/workspaces/{ws}/connections?api-version=2023-04-01-preview"
)
FLOW_META_PREFIX = "azureml.flow."
class ConnectionCategory(str, Enum):
AzureOpenAI = "AzureOpenAI"
CognitiveSearch = "CognitiveSearch"
CognitiveService = "CognitiveService"
CustomKeys = "CustomKeys"
def get_case_insensitive_key(d, key, default=None):
for k, v in d.items():
if k.lower() == key.lower():
return v
return default
class ArmConnectionOperations(_ScopeDependentOperations):
"""ArmConnectionOperations.
Get connections from arm api. You should not instantiate this class directly. Instead, you should
create an PFClient instance that instantiates it for you and
attaches it as an attribute.
"""
def __init__(
self,
operation_scope: OperationScope,
operation_config: OperationConfig,
all_operations: OperationsContainer,
credential,
service_caller: FlowServiceCaller,
**kwargs: Dict,
):
super(ArmConnectionOperations, self).__init__(operation_scope, operation_config)
self._all_operations = all_operations
self._service_caller = service_caller
self._credential = credential
def get(self, name, **kwargs):
connection_dict = self.build_connection_dict(name)
return _Connection._from_execution_connection_dict(name=name, data=connection_dict)
@classmethod
def _direct_get(cls, name, subscription_id, resource_group_name, workspace_name, credential):
"""
This method is added for local pf_client with workspace provider to ensure we only require limited
permission(workspace/list secrets). As create azure pf_client requires workspace read permission.
"""
connection_dict = cls._build_connection_dict(
name, subscription_id, resource_group_name, workspace_name, credential
)
return _Connection._from_execution_connection_dict(name=name, data=connection_dict)
@classmethod
def open_url(cls, token, url, action, host="management.azure.com", method="GET", model=None) -> Union[Any, dict]:
"""
:type token: str
:type url: str
:type action: str, for the error message format.
:type host: str
:type method: str
:type model: Type[msrest.serialization.Model]
"""
headers = {"Authorization": f"Bearer {token}"}
response = http_retry_wrapper(requests.request)(method, f"https://{host}{url}", headers=headers)
message_format = (
f"Open url {{url}} failed with status code: {response.status_code}, action: {action}, reason: {{reason}}"
)
if response.status_code == 403:
raise AccessDeniedError(operation=url, target=ErrorTarget.RUNTIME)
elif 400 <= response.status_code < 500:
raise OpenURLFailedUserError(
message_format=message_format,
url=url,
reason=response.reason,
)
elif response.status_code != 200:
raise OpenURLFailed(
message_format=message_format,
url=url,
reason=response.reason,
)
data = response.json()
if model:
return model.deserialize(data)
return data
@classmethod
def validate_and_fallback_connection_type(cls, name, type_name, category, metadata):
if type_name:
return type_name
if category == ConnectionCategory.AzureOpenAI:
return "AzureOpenAI"
if category == ConnectionCategory.CognitiveSearch:
return "CognitiveSearch"
if category == ConnectionCategory.CognitiveService:
kind = get_case_insensitive_key(metadata, "Kind")
if kind == "Content Safety":
return "AzureContentSafety"
if kind == "Form Recognizer":
return "FormRecognizer"
raise UnknownConnectionType(
message_format="Connection {name} is not recognized in PromptFlow, "
"please make sure the connection is created in PromptFlow.",
category=category,
name=name,
)
@classmethod
def build_connection_dict_from_rest_object(cls, name, obj) -> dict:
"""
:type name: str
:type obj: azure.ai.ml._restclient.v2023_06_01_preview.models.WorkspaceConnectionPropertiesV2BasicResource
"""
# Reference 1: https://msdata.visualstudio.com/Vienna/_git/vienna?path=/src/azureml-api/src/AccountRP/Contracts/WorkspaceConnection/WorkspaceConnectionDtoV2.cs&_a=blame&version=GBmaster # noqa: E501
# Reference 2: https://msdata.visualstudio.com/Vienna/_git/vienna?path=%2Fsrc%2Fazureml-api%2Fsrc%2FDesigner%2Fsrc%2FMiddleTier%2FMiddleTier%2FServices%2FPromptFlow%2FConnectionsManagement.cs&version=GBmaster&_a=contents # noqa: E501
# This connection type covers the generic ApiKey auth connection categories, for examples:
# AzureOpenAI:
# Category:= AzureOpenAI
# AuthType:= ApiKey (as type discriminator)
# Credentials:= {ApiKey} as <see cref="ApiKey"/>
# Target:= {ApiBase}
#
# CognitiveService:
# Category:= CognitiveService
# AuthType:= ApiKey (as type discriminator)
# Credentials:= {SubscriptionKey} as <see cref="ApiKey"/>
# Target:= ServiceRegion={serviceRegion}
#
# CognitiveSearch:
# Category:= CognitiveSearch
# AuthType:= ApiKey (as type discriminator)
# Credentials:= {Key} as <see cref="ApiKey"/>
# Target:= {Endpoint}
#
# Use Metadata property bag for ApiType, ApiVersion, Kind and other metadata fields
properties = obj.properties
type_name = get_case_insensitive_key(properties.metadata, f"{FLOW_META_PREFIX}connection_type")
type_name = cls.validate_and_fallback_connection_type(name, type_name, properties.category, properties.metadata)
module = get_case_insensitive_key(properties.metadata, f"{FLOW_META_PREFIX}module", "promptflow.connections")
# Note: Category is connectionType in MT, but type name should be class name, which is flowValueType in MT.
# Handle old connections here, see details: https://github.com/Azure/promptflow/tree/main/connections
type_name = f"{type_name}Connection" if not type_name.endswith("Connection") else type_name
meta = {"type": type_name, "module": module}
if properties.category == ConnectionCategory.AzureOpenAI:
value = {
"api_key": properties.credentials.key,
"api_base": properties.target,
"api_type": get_case_insensitive_key(properties.metadata, "ApiType"),
"api_version": get_case_insensitive_key(properties.metadata, "ApiVersion"),
}
# Note: Resource id is required in some cloud scenario, which is not exposed on sdk/cli entity.
resource_id = get_case_insensitive_key(properties.metadata, "ResourceId")
if resource_id:
value["resource_id"] = resource_id
elif properties.category == ConnectionCategory.CognitiveSearch:
value = {
"api_key": properties.credentials.key,
"api_base": properties.target,
"api_version": get_case_insensitive_key(properties.metadata, "ApiVersion"),
}
elif properties.category == ConnectionCategory.CognitiveService:
value = {
"api_key": properties.credentials.key,
"endpoint": properties.target,
"api_version": get_case_insensitive_key(properties.metadata, "ApiVersion"),
}
elif properties.category == ConnectionCategory.CustomKeys:
# Merge secrets from credentials.keys and other string fields from metadata
value = {
**properties.credentials.keys,
**{k: v for k, v in properties.metadata.items() if not k.startswith(FLOW_META_PREFIX)},
}
if type_name == CustomConnection.__name__:
meta["secret_keys"] = list(properties.credentials.keys.keys())
else:
raise UnknownConnectionType(
message_format=(
"Unknown connection {name} category {category}, "
"please upgrade your promptflow sdk version and retry."
),
category=properties.category,
name=name,
)
# Note: Filter empty values out to ensure default values can be picked when init class object.
return {**meta, "value": {k: v for k, v in value.items() if v}}
def build_connection_dict(self, name):
return self._build_connection_dict(
name,
self._operation_scope.subscription_id,
self._operation_scope.resource_group_name,
self._operation_scope.workspace_name,
self._credential,
)
@classmethod
def _convert_to_connection_dict(cls, conn_name, conn_data):
try:
rest_obj = WorkspaceConnectionPropertiesV2BasicResource.deserialize(conn_data)
conn_dict = cls.build_connection_dict_from_rest_object(conn_name, rest_obj)
return conn_dict
except Exception as e:
raise BuildConnectionError(
message_format=f"Build connection dict for connection {{name}} failed with {e}.",
name=conn_name,
)
@classmethod
def _build_connection_dict(cls, name, subscription_id, resource_group_name, workspace_name, credential) -> dict:
"""
:type name: str
:type subscription_id: str
:type resource_group_name: str
:type workspace_name: str
:type credential: azure.identity.TokenCredential
"""
url = GET_CONNECTION_URL.format(
sub=subscription_id,
rg=resource_group_name,
ws=workspace_name,
name=name,
)
try:
rest_obj: WorkspaceConnectionPropertiesV2BasicResource = cls.open_url(
get_arm_token(credential=credential),
url=url,
action="listsecrets",
method="POST",
model=WorkspaceConnectionPropertiesV2BasicResource,
)
except AccessDeniedError:
auth_error_message = (
"Access denied to list workspace secret due to invalid authentication. "
"Please ensure you have gain RBAC role 'Azure Machine Learning Workspace Connection Secrets Reader' "
"for current workspace, and wait for a few minutes to make sure the new role takes effect. "
)
raise OpenURLUserAuthenticationError(message=auth_error_message)
except ClientAuthenticationError as e:
raise UserErrorException(target=ErrorTarget.CONTROL_PLANE_SDK, message=str(e), error=e)
except Exception as e:
raise SystemErrorException(target=ErrorTarget.CONTROL_PLANE_SDK, message=str(e), error=e)
try:
return cls.build_connection_dict_from_rest_object(name, rest_obj)
except Exception as e:
raise BuildConnectionError(
message_format=f"Build connection dict for connection {{name}} failed with {e}.",
name=name,
)
class AccessDeniedError(UserErrorException):
"""Exception raised when run info can not be found in storage"""
def __init__(self, operation: str, target: ErrorTarget):
super().__init__(message=f"Access is denied to perform operation {operation!r}", target=target)
class OpenURLFailed(SystemErrorException):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
class BuildConnectionError(SystemErrorException):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
class UserAuthenticationError(UserErrorException):
"""Exception raised when user authentication failed"""
pass
class OpenURLUserAuthenticationError(UserAuthenticationError):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
class OpenURLFailedUserError(UserErrorException):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
class UnknownConnectionType(UserErrorException):
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.CONTROL_PLANE_SDK, **kwargs)
| promptflow/src/promptflow/promptflow/azure/operations/_arm_connection_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/operations/_arm_connection_operations.py",
"repo_id": "promptflow",
"token_count": 5724
} | 46 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from dataclasses import dataclass, is_dataclass
from promptflow._core.tools_manager import register_connections
from promptflow._sdk.entities import (
AzureContentSafetyConnection,
AzureOpenAIConnection,
CognitiveSearchConnection,
CustomConnection,
FormRecognizerConnection,
OpenAIConnection,
SerpConnection,
CustomStrongTypeConnection,
)
from promptflow._sdk.entities._connection import _Connection
from promptflow.contracts.types import Secret
@dataclass
class BingConnection:
api_key: Secret
url: str = "https://api.bing.microsoft.com/v7.0/search"
# We should use unified connection class everywhere.
# Do not add new connection class definition directly here.
# !!!Attention!!!: Do not add external package connections here.
__all__ = [
"OpenAIConnection",
"AzureOpenAIConnection",
"AzureContentSafetyConnection",
"SerpConnection",
"CognitiveSearchConnection",
"FormRecognizerConnection",
"CustomConnection",
"CustomStrongTypeConnection",
]
register_connections(
[v for v in globals().values() if is_dataclass(v) or (isinstance(v, type) and issubclass(v, _Connection))]
)
| promptflow/src/promptflow/promptflow/connections/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/connections/__init__.py",
"repo_id": "promptflow",
"token_count": 399
} | 47 |
import inspect
from typing import Any, Callable, Dict, List, Mapping
from promptflow._utils.logger_utils import flow_logger
from promptflow.contracts.flow import InputAssignment, InputValueType, Node
from promptflow.executor import _input_assignment_parser
class DAGManager:
def __init__(self, nodes: List[Node], flow_inputs: dict):
self._nodes = nodes
self._flow_inputs = flow_inputs
self._pending_nodes = {node.name: node for node in nodes}
self._completed_nodes_outputs = {} # node name -> output
self._bypassed_nodes = {} # node name -> node
# TODO: Validate the DAG to avoid circular dependencies
@property
def completed_nodes_outputs(self) -> Dict[str, Any]:
return self._completed_nodes_outputs
@property
def bypassed_nodes(self) -> Dict[str, Node]:
return self._bypassed_nodes
def pop_ready_nodes(self) -> List[Node]:
"""Returns a list of node names that are ready, and removes them from the list of nodes to be processed."""
ready_nodes: List[Node] = []
for node in self._pending_nodes.values():
if self._is_node_ready(node):
ready_nodes.append(node)
for node in ready_nodes:
del self._pending_nodes[node.name]
return ready_nodes
def pop_bypassable_nodes(self) -> List[Node]:
"""Returns a list of nodes that are bypassed, and removes them from the list of nodes to be processed."""
# Confirm node should be bypassed
bypassed_nodes: List[Node] = []
for node in self._pending_nodes.values():
if self._is_node_ready(node) and self._is_node_bypassable(node):
self._bypassed_nodes[node.name] = node
bypassed_nodes.append(node)
for node in bypassed_nodes:
del self._pending_nodes[node.name]
return bypassed_nodes
def get_node_valid_inputs(self, node: Node, f: Callable) -> Mapping[str, Any]:
"""Returns the valid inputs for the node, including the flow inputs, literal values and
the outputs of completed nodes. The valid inputs are determined by the function of the node.
:param node: The node for which to determine the valid inputs.
:type node: Node
:param f: The function of the current node, which is used to determine the valid inputs.
In the case when node dependency is bypassed, the input is not required when parameter has default value,
and the input is set to None when parameter has no default value.
:type f: Callable
:return: A dictionary mapping each valid input name to its value.
:rtype: dict
"""
results = {}
signature = inspect.signature(f).parameters
for name, i in (node.inputs or {}).items():
if self._is_node_dependency_bypassed(i):
# If the parameter has default value, the input will not be set so that the default value will be used.
if signature.get(name) is not None and signature[name].default is not inspect.Parameter.empty:
continue
# If the parameter has no default value, the input will be set to None so that function will not fail.
else:
flow_logger.warning(
f"The node '{i.value}' referenced by the input '{name}' of the current node '{node.name}' "
"has been bypassed, and no default value is set. Will use 'None' as the value for this input."
)
results[name] = None
else:
results[name] = self._get_node_dependency_value(i)
return results
def complete_nodes(self, nodes_outputs: Mapping[str, Any]):
"""Marks nodes as completed with the mapping from node names to their outputs."""
self._completed_nodes_outputs.update(nodes_outputs)
def completed(self) -> bool:
"""Returns True if all nodes have been processed."""
return all(
node.name in self._completed_nodes_outputs or node.name in self._bypassed_nodes for node in self._nodes
)
def _is_node_ready(self, node: Node) -> bool:
"""Returns True if the node is ready to be executed."""
node_dependencies = [i for i in node.inputs.values()]
# Add activate conditions as node dependencies
if node.activate:
node_dependencies.append(node.activate.condition)
for node_dependency in node_dependencies:
if (
node_dependency.value_type == InputValueType.NODE_REFERENCE
and node_dependency.value not in self._completed_nodes_outputs
and node_dependency.value not in self._bypassed_nodes
):
return False
return True
def _is_node_bypassable(self, node: Node) -> bool:
"""Returns True if the node should be bypassed."""
# Bypass node if the activate condition is not met
if node.activate:
# If the node referenced by activate condition is bypassed, the current node should be bypassed
if self._is_node_dependency_bypassed(node.activate.condition):
flow_logger.info(
f"The node '{node.name}' will be bypassed because it depends on the node "
f"'{node.activate.condition.value}' which has already been bypassed in the activate config."
)
return True
# If a node has activate config, we will always use this config
# to determine whether the node should be bypassed.
activate_condition = InputAssignment.serialize(node.activate.condition)
if not self._is_condition_met(node.activate.condition, node.activate.condition_value):
flow_logger.info(
f"The node '{node.name}' will be bypassed because the activate condition is not met, "
f"i.e. '{activate_condition}' is not equal to '{node.activate.condition_value}'."
)
return True
else:
flow_logger.info(
f"The node '{node.name}' will be executed because the activate condition is met, "
f"i.e. '{activate_condition}' is equal to '{node.activate.condition_value}'."
)
return False
# Bypass node if all of its node reference dependencies are bypassed
node_dependencies = [i for i in node.inputs.values() if i.value_type == InputValueType.NODE_REFERENCE]
all_dependencies_bypassed = node_dependencies and all(
self._is_node_dependency_bypassed(dependency) for dependency in node_dependencies
)
if all_dependencies_bypassed:
node_dependencies_list = [dependency.value for dependency in node_dependencies]
flow_logger.info(
f"The node '{node.name}' will be bypassed because all nodes "
f"{node_dependencies_list} it depends on are bypassed."
)
return all_dependencies_bypassed
def _is_condition_met(self, condition: InputAssignment, condition_value) -> bool:
condition = self._get_node_dependency_value(condition)
return condition == condition_value
def _get_node_dependency_value(self, node_dependency: InputAssignment):
return _input_assignment_parser.parse_value(node_dependency, self._completed_nodes_outputs, self._flow_inputs)
def _is_node_dependency_bypassed(self, dependency: InputAssignment) -> bool:
"""Returns True if the node dependency is bypassed.
There are two types of the node dependency:
1. The inputs of the node
2. The activate condition of the node
"""
return dependency.value_type == InputValueType.NODE_REFERENCE and dependency.value in self._bypassed_nodes
| promptflow/src/promptflow/promptflow/executor/_dag_manager.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/_dag_manager.py",
"repo_id": "promptflow",
"token_count": 3269
} | 48 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from ._cache_storage import AbstractCacheStorage # noqa: F401
from ._run_storage import AbstractRunStorage # noqa: F401
__all__ = ["AbstractCacheStorage", "AbstractRunStorage"]
| promptflow/src/promptflow/promptflow/storage/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/storage/__init__.py",
"repo_id": "promptflow",
"token_count": 74
} | 49 |
import re
from pathlib import Path
from tempfile import mkdtemp
import pytest
from promptflow._utils.exception_utils import ErrorResponse
from promptflow._utils.logger_utils import LogContext
from promptflow.contracts.run_info import Status
from promptflow.contracts.run_mode import RunMode
from promptflow.executor._flow_nodes_scheduler import RUN_FLOW_NODES_LINEARLY
from promptflow.executor._result import LineResult
from promptflow.executor.flow_executor import FlowExecutor
from ..utils import get_flow_inputs, get_yaml_file, load_content
TEST_ROOT = Path(__file__).parent.parent.parent
FLOWS_ROOT = TEST_ROOT / "test_configs/flows"
FLOW_FOLDER = "concurrent_execution_flow"
@pytest.mark.e2etest
class TestConcurrentExecution:
def test_concurrent_run(self):
logs_directory = Path(mkdtemp())
executor = FlowExecutor.create(get_yaml_file(FLOW_FOLDER), {})
flow_run_log_path = str(logs_directory / "test_flow_run.log")
# flow run: test exec_line
with LogContext(flow_run_log_path, run_mode=RunMode.Test):
results = executor.exec_line(get_flow_inputs(FLOW_FOLDER))
log_content = load_content(flow_run_log_path)
pattern = r"\[wait_(\d+) in line None.*Thread (\d+)"
matches = re.findall(pattern, log_content)
wait_thread_mapping = {}
for wait, thread in matches:
if wait in wait_thread_mapping:
if wait_thread_mapping[wait] != thread:
raise Exception(f"wait_{wait} corresponds to more than one thread number")
else:
wait_thread_mapping[wait] = thread
self.assert_run_result(results)
assert (
results.run_info.system_metrics["duration"] < 10
), "run nodes concurrently should decrease the total run time."
def test_concurrent_run_with_exception(self):
executor = FlowExecutor.create(get_yaml_file(FLOW_FOLDER), {}, raise_ex=False)
flow_result = executor.exec_line({"input1": "True", "input2": "False", "input3": "False", "input4": "False"})
assert 2 < flow_result.run_info.system_metrics["duration"] < 4, "Should at least finish the running job."
error_response = ErrorResponse.from_error_dict(flow_result.run_info.error)
assert error_response.error_code_hierarchy == "UserError/ToolExecutionError"
def test_linear_run(self):
executor = FlowExecutor.create(get_yaml_file(FLOW_FOLDER), {})
# flow run: test exec_line run linearly
results = executor.exec_line(get_flow_inputs(FLOW_FOLDER), node_concurrency=RUN_FLOW_NODES_LINEARLY)
self.assert_run_result(results)
assert 15 > results.run_info.system_metrics["duration"] > 10, "run nodes linearly will consume more time."
def assert_run_result(self, result: LineResult):
# Validate the flow status
assert result.run_info.status == Status.Completed
# Validate the flow output
assert isinstance(result.output, dict)
# Validate the flow node run infos
assert len(result.node_run_infos) == 5
| promptflow/src/promptflow/tests/executor/e2etests/test_concurent_execution.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/e2etests/test_concurent_execution.py",
"repo_id": "promptflow",
"token_count": 1258
} | 50 |
{# Please replace the template with your own prompt. #}
Write a simple program that displays the greeting message: "{{text}}" when executed.
| promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/my_prompt.jinja2/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool/my_prompt.jinja2",
"repo_id": "promptflow",
"token_count": 33
} | 51 |
import pytest
from promptflow._core.generator_proxy import GeneratorProxy, generate_from_proxy
def generator():
for i in range(3):
yield i
def iterator():
return iter([0, 1, 2])
@pytest.mark.unittest
def test_generator_proxy_next():
proxy = GeneratorProxy(generator())
assert proxy.items == []
assert next(proxy) == 0
assert next(proxy) == 1
assert next(proxy) == 2
with pytest.raises(StopIteration):
next(proxy)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generator_proxy_iter():
original_generator = generator()
proxy = GeneratorProxy(generator())
for num in proxy:
assert num == next(original_generator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generate_from_proxy():
proxy = GeneratorProxy(generator())
original_generator = generator()
for i in generate_from_proxy(proxy):
assert i == next(original_generator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_iterator_proxy_next():
proxy = GeneratorProxy(iterator())
assert proxy.items == []
assert next(proxy) == 0
assert next(proxy) == 1
assert next(proxy) == 2
with pytest.raises(StopIteration):
next(proxy)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_iterator_proxy_iter():
original_iterator = iterator()
proxy = GeneratorProxy(iterator())
for num in proxy:
assert num == next(original_iterator)
assert proxy.items == [0, 1, 2]
@pytest.mark.unittest
def test_generate_from_iterator_proxy():
proxy = GeneratorProxy(iterator())
original_iterator = iterator()
for i in generate_from_proxy(proxy):
assert i == next(original_iterator)
assert proxy.items == [0, 1, 2]
| promptflow/src/promptflow/tests/executor/unittests/_core/test_generator_proxy.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_core/test_generator_proxy.py",
"repo_id": "promptflow",
"token_count": 666
} | 52 |
import io
import logging
import time
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import Mock
from uuid import uuid4
import pytest
from promptflow._utils.credential_scrubber import CredentialScrubber
from promptflow._utils.logger_utils import (
CredentialScrubberFormatter,
FileHandler,
FileHandlerConcurrentWrapper,
LogContext,
bulk_logger,
scrub_credentials,
update_log_path,
update_single_log_path,
)
from promptflow.contracts.run_mode import RunMode
from ...utils import load_content
def _set_handler(logger: logging.Logger, handler: FileHandler, log_content: str):
for h in logger.handlers:
if isinstance(h, FileHandlerConcurrentWrapper):
h.handler = handler
time.sleep(1)
logger.warning(log_content)
h.clear()
class DummyException(Exception):
pass
@pytest.fixture
def logger():
logger = logging.getLogger(str(uuid4()))
logger.setLevel(logging.INFO)
return logger
@pytest.fixture
def stream_handler():
stream = io.StringIO()
return logging.StreamHandler(stream)
@pytest.mark.unittest
class TestCredentialScrubberFormatter:
def test_log(self, logger, stream_handler):
"""Make sure credentials by logger.log are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("testinfo&sig=signature")
logger.error("testerror&key=accountkey")
logger.warning("testwarning&sig=signature")
logger.critical("print dummy secret")
expected_log_output = (
f"testinfo&sig={CredentialScrubber.PLACE_HOLDER}\n"
f"testerror&key={CredentialScrubber.PLACE_HOLDER}\n"
f"testwarning&sig={CredentialScrubber.PLACE_HOLDER}\n"
f"print {CredentialScrubber.PLACE_HOLDER}\n"
)
assert stream_handler.stream.getvalue() == expected_log_output
def test_log_with_args(self, logger, stream_handler):
"""Make sure credentials by logger.log (in args) are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info("testinfo&sig=%s credential=%s", "signature", "dummy secret")
expected_log_output = (
f"testinfo&sig={CredentialScrubber.PLACE_HOLDER} " f"credential={CredentialScrubber.PLACE_HOLDER}\n"
)
assert stream_handler.stream.getvalue() == expected_log_output
def test_log_with_exc_info(self, logger, stream_handler):
"""Make sure credentials in exception are scrubbed."""
formatter = CredentialScrubberFormatter()
formatter.set_credential_list(["dummy secret"])
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
exception = DummyException("credential=dummy secret accountkey=accountkey")
logger.exception("test exception", exc_info=exception)
expected_log_output = "credential=**data_scrubbed** accountkey=**data_scrubbed**"
assert expected_log_output in stream_handler.stream.getvalue()
def test_set_credential_list_thread_safe(self):
formatter = CredentialScrubberFormatter()
def set_and_check_credential_list(credential_list):
formatter.set_credential_list(credential_list)
time.sleep(1)
assert formatter.credential_scrubber.custom_str_set == set(credential_list)
with ThreadPool(processes=3) as pool:
results = pool.map(set_and_check_credential_list, [[f"secret {i}", f"credential {i}"] for i in range(3)])
_ = list(results)
@pytest.mark.unittest
class TestFileHandlerConcurrentWrapper:
def test_set_handler_thread_safe(self):
wrapper = FileHandlerConcurrentWrapper()
logger = logging.getLogger("test execution log handler")
logger.addHandler(wrapper)
process_num = 3
folder_path = Path(mkdtemp())
log_path_list = [str(folder_path / f"log_{i}.log") for i in range(process_num)]
with ThreadPool(processes=process_num) as pool:
results = pool.starmap(
_set_handler, ((logger, FileHandler(log_path_list[i]), f"log {i}") for i in range(process_num))
)
results = list(results)
# Make sure log content is as expected.
for i, log_path in enumerate(log_path_list):
with open(log_path, "r") as f:
log = f.read()
log_lines = log.split("\n")
assert len(log_lines) == 2
assert f"log {i}" in log_lines[0]
assert log_lines[1] == ""
def test_clear(self):
wrapper = FileHandlerConcurrentWrapper()
assert wrapper.handler is None
log_path = str(Path(mkdtemp()) / "logs.log")
file_handler = FileHandler(log_path)
file_handler.close = Mock(side_effect=Exception("test exception"))
wrapper.handler = file_handler
wrapper.clear()
assert wrapper.handler is None
@pytest.mark.unittest
class TestLogContext:
def test_context_manager(self):
log_handler = FileHandlerConcurrentWrapper()
logger = logging.getLogger("test_setup_logger_context")
logger.addHandler(log_handler)
log_path = str(Path(mkdtemp()) / "test.log")
try:
log_context_initializer = LogContext(log_path).get_initializer()
log_context = log_context_initializer()
log_context.input_logger = logger
assert LogContext.get_current() is None
with log_context:
assert LogContext.get_current() is not None
# Make sure context variables are set.
inner_handler = log_handler._context_var.get()
assert isinstance(inner_handler, FileHandler)
assert isinstance(inner_handler._formatter, CredentialScrubberFormatter)
scrubber = inner_handler._formatter._context_var.get()
assert scrubber is not None
logger.warning("Print %s", "&sig=signature")
# Raise exception for test.
raise DummyException("Raise exception for test.")
except DummyException:
pass
# Make sure log content is as expected.
with open(log_path, "r") as f:
log_content = f.read()
assert f"Print &sig={CredentialScrubber.PLACE_HOLDER}" in log_content
# Make sure context variables are cleaned up.
assert log_handler._context_var.get() is None
def test_empty_file_path(self, logger, stream_handler):
logger.addHandler(stream_handler)
logger.addHandler(FileHandlerConcurrentWrapper())
with LogContext("", input_logger=logger):
logger.info("test log")
assert stream_handler.stream.getvalue() == "test log\n"
def test_update_log_path(self):
log_handler = FileHandlerConcurrentWrapper()
input_logger = logging.getLogger("input_logger")
input_logger.addHandler(log_handler)
folder_path = Path(mkdtemp())
original_log_path = str(folder_path / "original_log.log")
with LogContext(original_log_path, input_logger=input_logger, run_mode=RunMode.Batch):
bulk_logger.info("test log")
input_logger.warning("test input log")
original_log = load_content(original_log_path)
keywords = ["test log", "test input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in original_log for keyword in keywords)
# Update log path
log_path = str(folder_path / "log_without_input_logger.log")
update_log_path(log_path, input_logger)
bulk_logger.info("test update log")
input_logger.warning("test update input log")
log = load_content(log_path)
keywords = ["test update log", "test update input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in log for keyword in keywords)
def test_update_single_log_path(self):
log_handler = FileHandlerConcurrentWrapper()
input_logger = logging.getLogger("input_logger")
input_logger.addHandler(log_handler)
folder_path = Path(mkdtemp())
original_log_path = str(folder_path / "original_log.log")
with LogContext(original_log_path, input_logger=input_logger, run_mode=RunMode.Batch):
bulk_logger.info("test log")
input_logger.warning("test input log")
original_log = load_content(original_log_path)
keywords = ["test log", "test input log", "execution.bulk", "input_logger", "INFO", "WARNING"]
assert all(keyword in original_log for keyword in keywords)
# Update log path
bulk_log_path = str(folder_path / "update_bulk_log.log")
update_single_log_path(bulk_log_path, bulk_logger)
input_log_path = str(folder_path / "update_input_log.log")
update_single_log_path(input_log_path, input_logger)
bulk_logger.info("test update log")
input_logger.warning("test update input log")
bulk_log = load_content(bulk_log_path)
input_log = load_content(input_log_path)
bulk_keywords = ["test update log", "execution.bulk", "INFO"]
input_keywords = ["test update input log", "input_logger", "WARNING"]
assert all(keyword in bulk_log for keyword in bulk_keywords)
assert all(keyword not in bulk_log for keyword in input_keywords)
assert all(keyword in input_log for keyword in input_keywords)
assert all(keyword not in input_log for keyword in bulk_keywords)
def test_scrub_credentials(self):
log_content = "sig=signature&key=accountkey"
folder_path = Path(mkdtemp())
logs_path = str(folder_path / "logs.log")
scrubbed_log_content = scrub_credentials(log_content)
assert scrubbed_log_content == "sig=**data_scrubbed**&key=**data_scrubbed**"
with LogContext(logs_path):
scrubbed_log_content = scrub_credentials(log_content)
assert scrubbed_log_content == "sig=**data_scrubbed**&key=**data_scrubbed**"
| promptflow/src/promptflow/tests/executor/unittests/_utils/test_logger_utils.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_utils/test_logger_utils.py",
"repo_id": "promptflow",
"token_count": 4473
} | 53 |
import json
import pytest
from promptflow._sdk._constants import VIS_JS_BUNDLE_FILENAME
from promptflow.contracts._run_management import VisualizationRender
@pytest.mark.unittest
def test_visualization_render():
data = {"key": "value"}
viz = VisualizationRender(data)
assert viz.data == json.dumps(json.dumps(data))
assert viz.js_path == VIS_JS_BUNDLE_FILENAME
| promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_management.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/contracts/test_run_management.py",
"repo_id": "promptflow",
"token_count": 135
} | 54 |
import multiprocessing
import os
import sys
import uuid
from multiprocessing import Queue
from pathlib import Path
from tempfile import mkdtemp
from unittest.mock import patch
import pytest
from pytest_mock import MockFixture
from promptflow._utils.logger_utils import LogContext
from promptflow.contracts.run_info import Status
from promptflow.exceptions import ErrorTarget, UserErrorException
from promptflow.executor import FlowExecutor
from promptflow.executor._errors import SpawnedForkProcessManagerStartFailure
from promptflow.executor._line_execution_process_pool import (
LineExecutionProcessPool,
_exec_line,
format_current_process_info,
get_available_max_worker_count,
log_process_status,
)
from promptflow.executor._process_manager import create_spawned_fork_process_manager
from promptflow.executor._result import LineResult
from ...utils import get_flow_sample_inputs, get_yaml_file
SAMPLE_FLOW = "web_classification_no_variants"
def get_line_inputs(flow_folder=""):
if flow_folder:
inputs = get_bulk_inputs(flow_folder)
return inputs[0]
return {
"url": "https://www.microsoft.com/en-us/windows/",
"text": "some_text",
}
def get_bulk_inputs(nlinee=4, flow_folder="", sample_inputs_file="", return_dict=False):
if flow_folder:
if not sample_inputs_file:
sample_inputs_file = "samples.json"
inputs = get_flow_sample_inputs(flow_folder, sample_inputs_file=sample_inputs_file)
if isinstance(inputs, list) and len(inputs) > 0:
return inputs
elif isinstance(inputs, dict):
if return_dict:
return inputs
return [inputs]
else:
raise Exception(f"Invalid type of bulk input: {inputs}")
return [get_line_inputs() for _ in range(nlinee)]
def execute_in_fork_mode_subprocess(
dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process
):
os.environ["PF_BATCH_METHOD"] = "fork"
if is_set_environ_pf_worker_count:
os.environ["PF_WORKER_COUNT"] = pf_worker_count
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
assert pool._n_process == n_process
if is_set_environ_pf_worker_count:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment " f"variable 'PF_WORKER_COUNT'."
)
else:
factors = {
"default_worker_count": pool._DEFAULT_WORKER_COUNT,
"row_count": pool._nlines,
}
mock_logger.info.assert_any_call(
f"Set process count to {n_process} by taking the minimum value among the " f"factors of {factors}."
)
def execute_in_spawn_mode_subprocess(
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
):
os.environ["PF_BATCH_METHOD"] = "spawn"
if is_set_environ_pf_worker_count:
os.environ["PF_WORKER_COUNT"] = pf_worker_count
executor = FlowExecutor.create(
get_yaml_file(flow_folder),
dev_connections,
)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with patch("psutil.virtual_memory") as mock_mem:
mock_mem.return_value.available = 128.0 * 1024 * 1024
with patch("psutil.Process") as mock_process:
mock_process.return_value.memory_info.return_value.rss = 64 * 1024 * 1024
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
assert pool._n_process == n_process
if is_set_environ_pf_worker_count and is_calculation_smaller_than_set:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment "
f"variable 'PF_WORKER_COUNT'."
)
mock_logger.warning.assert_any_call(
f"The current process count ({pf_worker_count}) is larger than recommended process count "
f"({estimated_available_worker_count}) that estimated by system available memory. This may "
f"cause memory exhaustion"
)
elif is_set_environ_pf_worker_count and not is_calculation_smaller_than_set:
mock_logger.info.assert_any_call(
f"Set process count to {pf_worker_count} with the environment "
f"variable 'PF_WORKER_COUNT'."
)
elif not is_set_environ_pf_worker_count:
factors = {
"default_worker_count": pool._DEFAULT_WORKER_COUNT,
"row_count": pool._nlines,
"estimated_worker_count_based_on_memory_usage": estimated_available_worker_count,
}
mock_logger.info.assert_any_call(
f"Set process count to {n_process} by taking the minimum value among the factors "
f"of {factors}."
)
def create_line_execution_process_pool(dev_connections):
executor = FlowExecutor.create(get_yaml_file(SAMPLE_FLOW), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
line_execution_process_pool = LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
line_timeout_sec=1,
)
return line_execution_process_pool
def set_environment_successed_in_subprocess(dev_connections, pf_batch_method):
os.environ["PF_BATCH_METHOD"] = pf_batch_method
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork is False
def set_environment_failed_in_subprocess(dev_connections):
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
mock_logger.warning.return_value = None
os.environ["PF_BATCH_METHOD"] = "test"
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork == (multiprocessing.get_start_method() == "fork")
sys_start_methods = multiprocessing.get_all_start_methods()
exexpected_log_message = (
"Failed to set start method to 'test', start method test" f" is not in: {sys_start_methods}."
)
mock_logger.warning.assert_called_once_with(exexpected_log_message)
def not_set_environment_in_subprocess(dev_connections):
line_execution_process_pool = create_line_execution_process_pool(dev_connections)
use_fork = line_execution_process_pool._use_fork
assert use_fork == (multiprocessing.get_start_method() == "fork")
def custom_create_spawned_fork_process_manager(*args, **kwargs):
create_spawned_fork_process_manager("test", *args, **kwargs)
@pytest.mark.unittest
class TestLineExecutionProcessPool:
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_line_execution_process_pool(self, flow_folder, dev_connections):
log_path = str(Path(mkdtemp()) / "test.log")
log_context_initializer = LogContext(log_path).get_initializer()
log_context = log_context_initializer()
with log_context:
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
executor._log_interval = 1
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
run_id = run_id or str(uuid.uuid4())
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
result_list = pool.run(zip(range(nlines), bulk_inputs))
assert len(result_list) == nlines
for i, line_result in enumerate(result_list):
assert isinstance(line_result, LineResult)
assert line_result.run_info.status == Status.Completed, f"{i}th line got {line_result.run_info.status}"
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_line_execution_not_completed(self, flow_folder, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
line_timeout_sec=1,
) as pool:
result_list = pool.run(zip(range(nlines), bulk_inputs))
result_list = sorted(result_list, key=lambda r: r.run_info.index)
assert len(result_list) == nlines
for i, line_result in enumerate(result_list):
assert isinstance(line_result, LineResult)
assert line_result.run_info.error["message"] == f"Line {i} execution timeout for exceeding 1 seconds"
assert line_result.run_info.error["code"] == "UserError"
assert line_result.run_info.status == Status.Failed
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_exec_line(self, flow_folder, dev_connections, mocker: MockFixture):
output_queue = Queue()
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
line_inputs = get_line_inputs()
line_result = _exec_line(
executor=executor,
output_queue=output_queue,
inputs=line_inputs,
run_id=run_id,
index=0,
line_timeout_sec=600,
)
assert isinstance(line_result, LineResult)
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_exec_line_failed_when_line_execution_not_start(self, flow_folder, dev_connections, mocker: MockFixture):
output_queue = Queue()
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
test_error_msg = "Test user error"
with patch("promptflow.executor.flow_executor.FlowExecutor.exec_line", autouse=True) as mock_exec_line:
mock_exec_line.side_effect = UserErrorException(
message=test_error_msg, target=ErrorTarget.AZURE_RUN_STORAGE
)
run_id = str(uuid.uuid4())
line_inputs = get_line_inputs()
line_result = _exec_line(
executor=executor,
output_queue=output_queue,
inputs=line_inputs,
run_id=run_id,
index=0,
line_timeout_sec=600,
)
assert isinstance(line_result, LineResult)
assert line_result.run_info.error["message"] == test_error_msg
assert line_result.run_info.error["code"] == "UserError"
assert line_result.run_info.status == Status.Failed
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
def test_process_pool_run_with_exception(self, flow_folder, dev_connections, mocker: MockFixture):
# mock process pool run execution raise error
test_error_msg = "Test user error"
mocker.patch(
"promptflow.executor._line_execution_process_pool.LineExecutionProcessPool."
"_monitor_workers_and_process_tasks_in_thread",
side_effect=UserErrorException(message=test_error_msg, target=ErrorTarget.AZURE_RUN_STORAGE),
)
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
with pytest.raises(UserErrorException) as e:
pool.run(zip(range(nlines), bulk_inputs))
assert e.value.message == test_error_msg
assert e.value.target == ErrorTarget.AZURE_RUN_STORAGE
assert e.value.error_codes[0] == "UserError"
@pytest.mark.parametrize(
("flow_folder", "is_set_environ_pf_worker_count", "pf_worker_count", "n_process"),
[(SAMPLE_FLOW, True, "3", 3), (SAMPLE_FLOW, False, None, 4)],
)
def test_process_pool_parallelism_in_fork_mode(
self, dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process
):
if "fork" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: fork")
p = multiprocessing.Process(
target=execute_in_fork_mode_subprocess,
args=(dev_connections, flow_folder, is_set_environ_pf_worker_count, pf_worker_count, n_process),
)
p.start()
p.join()
assert p.exitcode == 0
@pytest.mark.parametrize(
(
"flow_folder",
"is_set_environ_pf_worker_count",
"is_calculation_smaller_than_set",
"pf_worker_count",
"estimated_available_worker_count",
"n_process",
),
[
(SAMPLE_FLOW, True, False, "2", 4, 2),
(SAMPLE_FLOW, True, True, "6", 2, 6),
(SAMPLE_FLOW, False, True, None, 2, 2),
],
)
def test_process_pool_parallelism_in_spawn_mode(
self,
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
):
if "spawn" not in multiprocessing.get_all_start_methods():
pytest.skip("Unsupported start method: spawn")
p = multiprocessing.Process(
target=execute_in_spawn_mode_subprocess,
args=(
dev_connections,
flow_folder,
is_set_environ_pf_worker_count,
is_calculation_smaller_than_set,
pf_worker_count,
estimated_available_worker_count,
n_process,
),
)
p.start()
p.join()
assert p.exitcode == 0
def test_process_set_environment_variable_successed(self, dev_connections):
p = multiprocessing.Process(
target=set_environment_successed_in_subprocess,
args=(
dev_connections,
"spawn",
),
)
p.start()
p.join()
assert p.exitcode == 0
def test_process_set_environment_variable_failed(self, dev_connections):
p = multiprocessing.Process(target=set_environment_failed_in_subprocess, args=(dev_connections,))
p.start()
p.join()
assert p.exitcode == 0
def test_process_not_set_environment_variable(self, dev_connections):
p = multiprocessing.Process(target=not_set_environment_in_subprocess, args=(dev_connections,))
p.start()
p.join()
assert p.exitcode == 0
@pytest.mark.skipif(sys.platform == "win32" or sys.platform == "darwin", reason="Only test on linux")
@pytest.mark.parametrize(
"flow_folder",
[
SAMPLE_FLOW,
],
)
@patch(
"promptflow.executor._process_manager.create_spawned_fork_process_manager",
custom_create_spawned_fork_process_manager,
)
def test_spawned_fork_process_manager_crashed_in_fork_mode(self, flow_folder, dev_connections):
executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections)
run_id = str(uuid.uuid4())
bulk_inputs = get_bulk_inputs()
nlines = len(bulk_inputs)
run_id = run_id or str(uuid.uuid4())
with pytest.raises(SpawnedForkProcessManagerStartFailure) as e:
with LineExecutionProcessPool(
executor,
nlines,
run_id,
None,
) as pool:
pool.run(zip(range(nlines), bulk_inputs))
assert "Failed to start spawned fork process manager" in str(e.value)
class TestGetAvailableMaxWorkerCount:
@pytest.mark.parametrize(
"available_memory, process_memory, expected_max_worker_count, actual_calculate_worker_count",
[
(128.0, 64.0, 2, 2), # available_memory/process_memory > 1
(63.0, 64.0, 1, 0), # available_memory/process_memory < 1
],
)
def test_get_available_max_worker_count(
self, available_memory, process_memory, expected_max_worker_count, actual_calculate_worker_count
):
with patch("psutil.virtual_memory") as mock_mem:
mock_mem.return_value.available = available_memory * 1024 * 1024
with patch("psutil.Process") as mock_process:
mock_process.return_value.memory_info.return_value.rss = process_memory * 1024 * 1024
with patch("promptflow.executor._line_execution_process_pool.bulk_logger") as mock_logger:
mock_logger.warning.return_value = None
estimated_available_worker_count = get_available_max_worker_count()
assert estimated_available_worker_count == expected_max_worker_count
if actual_calculate_worker_count < 1:
mock_logger.warning.assert_called_with(
f"Current system's available memory is {available_memory}MB, less than the memory "
f"{process_memory}MB required by the process. The maximum available worker count is 1."
)
else:
mock_logger.info.assert_called_with(
f"Current system's available memory is {available_memory}MB, "
f"memory consumption of current process is {process_memory}MB, "
f"estimated available worker count is {available_memory}/{process_memory} "
f"= {actual_calculate_worker_count}"
)
@pytest.mark.unittest
class TestFormatCurrentProcess:
def test_format_current_process_info(self):
process_name = "process_name"
process_pid = 123
line_number = 13
formatted_message = format_current_process_info(process_name, process_pid, line_number)
expected_returned_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number})"
)
assert formatted_message == expected_returned_log_message
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_start_execution(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) start execution."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_completed(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number, is_completed=True)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) completed."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
@patch("promptflow.executor._line_execution_process_pool.bulk_logger.info", autospec=True)
def test_log_process_status_failed(self, mock_logger_info):
process_name = "process_name"
process_pid = 123
line_number = 13
log_process_status(process_name, process_pid, line_number, is_failed=True)
exexpected_during_execution_log_message = (
f"Process name({process_name})-Process id({process_pid})-Line number({line_number}) failed."
)
mock_logger_info.assert_called_once_with(exexpected_during_execution_log_message)
| promptflow/src/promptflow/tests/executor/unittests/processpool/test_line_execution_process_pool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/processpool/test_line_execution_process_pool.py",
"repo_id": "promptflow",
"token_count": 10244
} | 55 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import json
from pathlib import Path
import pytest
from promptflow.azure._entities._flow import Flow
from .._azure_utils import DEFAULT_TEST_TIMEOUT, PYTEST_TIMEOUT_METHOD
from ..recording_utilities import is_live
tests_root_dir = Path(__file__).parent.parent.parent
flow_test_dir = tests_root_dir / "test_configs/flows"
data_dir = tests_root_dir / "test_configs/datas"
@pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD)
@pytest.mark.e2etest
@pytest.mark.usefixtures(
"mock_set_headers_with_user_aml_token",
"single_worker_thread_pool",
"vcr_recording",
)
class TestFlow:
def test_create_flow(self, created_flow: Flow):
# most of the assertions are in the fixture itself
assert isinstance(created_flow, Flow)
def test_get_flow(self, pf, created_flow: Flow):
result = pf.flows.get(name=created_flow.name)
# assert created flow is the same as the one retrieved
attributes = vars(result)
for attr in attributes:
assert getattr(result, attr) == getattr(created_flow, attr), f"Assertion failed for attribute: {attr!r}"
@pytest.mark.skipif(
condition=not is_live(),
reason="Complicated test combining `pf flow test` and global config",
)
def test_flow_test_with_config(self, remote_workspace_resource_id):
from promptflow import PFClient
client = PFClient(config={"connection.provider": remote_workspace_resource_id})
output = client.test(flow=flow_test_dir / "web_classification")
assert output.keys() == {"category", "evidence"}
@pytest.mark.usefixtures("mock_get_user_identity_info")
def test_list_flows(self, pf):
flows = pf.flows.list(max_results=3)
for flow in flows:
print(json.dumps(flow._to_dict(), indent=4))
assert len(flows) == 3
| promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_operations.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_operations.py",
"repo_id": "promptflow",
"token_count": 751
} | 56 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from pathlib import Path
from unittest.mock import patch
import pytest
from promptflow._sdk._errors import FlowOperationError
from promptflow.exceptions import UserErrorException
tests_root_dir = Path(__file__).parent.parent.parent
flow_test_dir = tests_root_dir / "test_configs/flows"
data_dir = tests_root_dir / "test_configs/datas"
@pytest.mark.unittest
class TestFlowOperations:
def test_create_flow_with_invalid_parameters(self, pf):
with pytest.raises(UserErrorException, match=r"Flow source must be a directory with"):
pf.flows.create_or_update(flow="fake_source")
flow_source = flow_test_dir / "web_classification/"
with pytest.raises(UserErrorException, match="Not a valid string"):
pf.flows.create_or_update(flow=flow_source, display_name=False)
with pytest.raises(UserErrorException, match="Must be one of: standard, evaluation, chat"):
pf.flows.create_or_update(flow=flow_source, type="unknown")
with pytest.raises(UserErrorException, match="Not a valid string"):
pf.flows.create_or_update(flow=flow_source, description=False)
with pytest.raises(UserErrorException, match="Not a valid string"):
pf.flows.create_or_update(flow=flow_source, tags={"key": False})
@pytest.mark.usefixtures("enable_logger_propagate")
def test_create_flow_with_warnings(self, pf, caplog):
flow_source = flow_test_dir / "web_classification/"
pf.flows._validate_flow_creation_parameters(source=flow_source, random="random")
assert "random: Unknown field" in caplog.text
def test_list_flows_invalid_cases(self, pf):
with pytest.raises(FlowOperationError, match="'max_results' must be a positive integer"):
pf.flows.list(max_results=0)
with pytest.raises(FlowOperationError, match="'flow_type' must be one of"):
pf.flows.list(flow_type="unknown")
with pytest.raises(FlowOperationError, match="Invalid list view type"):
pf.flows.list(list_view_type="invalid")
def test_get_user_identity_info(self):
# we have a fixture "mock_get_user_identity_info" to mock this function during record and replay
# as we don't want to deal with token in these modes; meanwhile, considering coverage, add this
# unit test to try to cover this code path.
import jwt
from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller
mock_oid, mock_tid = "mock_oid", "mock_tid"
def mock_init(*args, **kwargs) -> str:
self = args[0]
self._credential = None
def mock_get_arm_token(*args, **kwargs) -> str:
return jwt.encode(
payload={
"oid": mock_oid,
"tid": mock_tid,
},
key="",
)
with patch(
"promptflow.azure._restclient.flow_service_caller.get_arm_token",
new=mock_get_arm_token,
):
with patch.object(FlowServiceCaller, "__init__", new=mock_init):
service_caller = FlowServiceCaller(workspace=None, credential=None, operation_scope=None)
user_object_id, user_tenant_id = service_caller._get_user_identity_info()
assert user_object_id == mock_oid
assert user_tenant_id == mock_tid
| promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_flow_operations.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_flow_operations.py",
"repo_id": "promptflow",
"token_count": 1471
} | 57 |
import uuid
from pathlib import Path
import pydash
import pytest
from promptflow._sdk._constants import SCRUBBED_VALUE
from promptflow._sdk._pf_client import PFClient
from promptflow._sdk.entities import AzureOpenAIConnection, CustomConnection
_client = PFClient()
TEST_ROOT = Path(__file__).parent.parent.parent
CONNECTION_ROOT = TEST_ROOT / "test_configs/connections"
@pytest.mark.cli_test
@pytest.mark.e2etest
class TestConnection:
def test_connection_operations(self):
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = AzureOpenAIConnection(name=name, api_key="test", api_base="test")
# Create
_client.connections.create_or_update(conn)
# Get
result = _client.connections.get(name)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "azure_open_ai",
"api_key": "******",
"api_base": "test",
"api_type": "azure",
"api_version": "2023-07-01-preview",
}
# Update
conn.api_base = "test2"
result = _client.connections.create_or_update(conn)
assert pydash.omit(result._to_dict(), ["created_date", "last_modified_date", "name"]) == {
"module": "promptflow.connections",
"type": "azure_open_ai",
"api_key": "******",
"api_base": "test2",
"api_type": "azure",
"api_version": "2023-07-01-preview",
}
# List
result = _client.connections.list()
assert len(result) > 0
# Delete
_client.connections.delete(name)
with pytest.raises(Exception) as e:
_client.connections.get(name)
assert "is not found." in str(e.value)
def test_connection_get_and_update(self):
# Test api key not updated
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = AzureOpenAIConnection(name=name, api_key="test", api_base="test")
result = _client.connections.create_or_update(conn)
assert result.api_key == SCRUBBED_VALUE
# Update api_base only Assert no exception
result.api_base = "test2"
result = _client.connections.create_or_update(result)
assert result._to_dict()["api_base"] == "test2"
# Assert value not scrubbed
assert result._secrets["api_key"] == "test"
_client.connections.delete(name)
# Invalid update
with pytest.raises(Exception) as e:
result._secrets = {}
_client.connections.create_or_update(result)
assert "secrets ['api_key'] value invalid, please fill them" in str(e.value)
def test_custom_connection_get_and_update(self):
# Test api key not updated
name = f"Connection_{str(uuid.uuid4())[:4]}"
conn = CustomConnection(name=name, secrets={"api_key": "test"}, configs={"api_base": "test"})
result = _client.connections.create_or_update(conn)
assert result.secrets["api_key"] == SCRUBBED_VALUE
# Update api_base only Assert no exception
result.configs["api_base"] = "test2"
result = _client.connections.create_or_update(result)
assert result._to_dict()["configs"]["api_base"] == "test2"
# Assert value not scrubbed
assert result._secrets["api_key"] == "test"
_client.connections.delete(name)
# Invalid update
with pytest.raises(Exception) as e:
result._secrets = {}
_client.connections.create_or_update(result)
assert "secrets ['api_key'] value invalid, please fill them" in str(e.value)
@pytest.mark.parametrize(
"file_name, expected_updated_item, expected_secret_item",
[
("azure_openai_connection.yaml", ("api_base", "new_value"), ("api_key", "<to-be-replaced>")),
("custom_connection.yaml", ("key1", "new_value"), ("key2", "test2")),
],
)
def test_upsert_connection_from_file(self, file_name, expected_updated_item, expected_secret_item):
from promptflow._cli._pf._connection import _upsert_connection_from_file
name = f"Connection_{str(uuid.uuid4())[:4]}"
result = _upsert_connection_from_file(file=CONNECTION_ROOT / file_name, params_override=[{"name": name}])
assert result is not None
update_file_name = f"update_{file_name}"
result = _upsert_connection_from_file(file=CONNECTION_ROOT / update_file_name, params_override=[{"name": name}])
# Test secrets not updated, and configs updated
assert (
result.configs[expected_updated_item[0]] == expected_updated_item[1]
), "Assert configs updated failed, expected: {}, actual: {}".format(
expected_updated_item[1], result.configs[expected_updated_item[0]]
)
assert (
result._secrets[expected_secret_item[0]] == expected_secret_item[1]
), "Assert secrets not updated failed, expected: {}, actual: {}".format(
expected_secret_item[1], result._secrets[expected_secret_item[0]]
)
| promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_connection.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/e2etests/test_connection.py",
"repo_id": "promptflow",
"token_count": 2245
} | 58 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/CustomConnection.schema.json
name: my_custom_connection
type: custom
configs:
key1: "test1"
secrets: # must-have
key2: "test2"
| promptflow/src/promptflow/tests/test_configs/connections/custom_connection.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/connections/custom_connection.yaml",
"repo_id": "promptflow",
"token_count": 76
} | 59 |
{"input_val": "input1"}
| promptflow/src/promptflow/tests/test_configs/datas/simple_eager_flow_data.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/datas/simple_eager_flow_data.jsonl",
"repo_id": "promptflow",
"token_count": 10
} | 60 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Experiment.schema.json
description: Basic experiment without script node
data:
- name: my_data
path: ../../flows/web_classification/data.jsonl
inputs:
- name: my_input
type: int
default: 1
nodes:
- name: main
type: flow
path: ../../flows/web_classification/flow.dag.yaml
inputs:
url: ${data.my_data.url}
variant: ${summarize_text_content.variant_0}
environment_variables: {}
connections: {}
- name: eval
type: flow
path: ../../flows/eval-classification-accuracy
inputs:
groundtruth: ${data.my_data.answer} # No node can be named with "data"
prediction: ${main.outputs.category}
environment_variables: {}
connections: {}
| promptflow/src/promptflow/tests/test_configs/experiments/basic-no-script-template/basic.exp.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/experiments/basic-no-script-template/basic.exp.yaml",
"repo_id": "promptflow",
"token_count": 301
} | 61 |
{"groundtruth": "Tomorrow's weather will be sunny.","prediction": "The weather will be sunny tomorrow."}
{"groundtruth": "Hello,","prediction": "World."}
{"groundtruth": "Promptflow is a super easy-to-use tool, right?","prediction": "Yes!"}
| promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/data.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/aggregation_node_failed/data.jsonl",
"repo_id": "promptflow",
"token_count": 70
} | 62 |
[{"text": "Hello World!"}]
| promptflow/src/promptflow/tests/test_configs/flows/basic-with-connection/samples.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/basic-with-connection/samples.json",
"repo_id": "promptflow",
"token_count": 11
} | 63 |
from promptflow import tool
@tool
def show_answer(chat_answer: str):
print("print:", chat_answer)
return chat_answer
| promptflow/src/promptflow/tests/test_configs/flows/chat_flow/show_answer.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat_flow/show_answer.py",
"repo_id": "promptflow",
"token_count": 43
} | 64 |
inputs:
chat_history:
type: list
is_chat_history: true
question:
type: string
is_chat_input: true
outputs:
answer:
type: string
reference: ${stream.output.answer}
is_chat_output: true
nodes:
- name: stream
type: python
source:
type: code
path: stream.py
inputs:
chat_history: ${inputs.chat_history}
question: ${inputs.question} | promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 154
} | 65 |
{
"input1": "False",
"input2": "False",
"input3": "False",
"input4": "False"
} | promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/inputs.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/concurrent_execution_flow/inputs.json",
"repo_id": "promptflow",
"token_count": 39
} | 66 |
from promptflow import tool
from promptflow import log_metric
@tool
def average(input: list):
avg, cnt = 0, 0
for num in input:
if num!=None:
avg += num
cnt += 1
if len(input) > 0:
avg = avg/cnt
log_metric("average", avg)
return avg
| promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/aggregation_node.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/conditional_flow_with_aggregate_bypassed/aggregation_node.py",
"repo_id": "promptflow",
"token_count": 108
} | 67 |
#! /bin/bash
CONDA_ENV_PATH="$(conda info --base)/envs/promptflow-serve"
export PATH="$CONDA_ENV_PATH/bin:$PATH"
ls
ls /connections
pf connection create --file /connections/custom_connection.yaml
echo "start promptflow serving with worker_num: 8, worker_threads: 1"
cd /flow
gunicorn -w 8 --threads 1 -b "0.0.0.0:8080" --timeout 300 "promptflow._sdk._serving.app:create_app()" | promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit/promptflow-serve/run/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/export/linux/runit/promptflow-serve/run",
"repo_id": "promptflow",
"token_count": 147
} | 68 |
from promptflow import tool
@tool
def get_dict_val(key):
# get from env var
print(key)
if not isinstance(key, dict):
raise TypeError(f"key must be a dict, got {type(key)}")
return {"value": f"{key}: {type(key)}", "origin_value": key}
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/get_dict_val.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_dict_input/get_dict_val.py",
"repo_id": "promptflow",
"token_count": 103
} | 69 |
inputs:
text:
type: string
outputs:
output_prompt:
type: string
reference: ${echo_my_prompt.output}
nodes:
- inputs:
text: ${inputs.text}
name: echo_my_prompt
type: python
source:
type: code
path: hello.py
node_variants: {}
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_invalid_import/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_invalid_import/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 111
} | 70 |
inputs:
text:
type: string
default: Hello!
outputs:
out:
type: string
reference: ${My_First_Tool_00f8.output}
nodes:
- name: My_Second_Tool_usi3
type: python
source:
type: package
tool: my_tool_package.tools.my_tool_2.MyTool.my_tool
inputs:
connection: custom_strong_type_connection
input_text: ${inputs.text}
- name: My_First_Tool_00f8
type: python
source:
type: package
tool: my_tool_package.tools.my_tool_1.my_tool
inputs:
connection: custom_strong_type_connection
input_text: ${My_Second_Tool_usi3.output}
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_strong_type_connection/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_package_tool_with_custom_strong_type_connection/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 237
} | 71 |
import asyncio
from time import sleep
from promptflow import tool, trace
@trace
async def is_valid_name(name):
await asyncio.sleep(0.5)
return len(name) > 0
@trace
async def get_user_name(user_id):
await asyncio.sleep(0.5)
user_name = f"User {user_id}"
if not await is_valid_name(user_name):
raise ValueError(f"Invalid user name: {user_name}")
return user_name
@trace
async def format_greeting(user_name):
await asyncio.sleep(0.5)
return f"Hello, {user_name}!"
@tool
async def greetings(user_id):
user_name = await get_user_name(user_id)
greeting = await format_greeting(user_name)
print(greeting)
return {"greeting": greeting}
| promptflow/src/promptflow/tests/test_configs/flows/flow_with_trace_async/greetings.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/flow_with_trace_async/greetings.py",
"repo_id": "promptflow",
"token_count": 276
} | 72 |
import time
from promptflow import tool
def f1():
time.sleep(61)
return 0
def f2():
return f1()
@tool
def long_run_func():
return f2()
| promptflow/src/promptflow/tests/test_configs/flows/long_run/long_run.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/long_run/long_run.py",
"repo_id": "promptflow",
"token_count": 65
} | 73 |
from promptflow import tool
import random
@tool
def my_python_tool(idx: int) -> int:
return idx | promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/my_python_tool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/one_line_of_bulktest_timeout/my_python_tool.py",
"repo_id": "promptflow",
"token_count": 35
} | 74 |
{
"text": "Hello World!"
} | promptflow/src/promptflow/tests/test_configs/flows/python_stream_tools/inputs.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_stream_tools/inputs.json",
"repo_id": "promptflow",
"token_count": 14
} | 75 |
inputs:
image:
type: image
default: ""
outputs:
output:
type: image
reference: ${python_node_2.output}
nodes:
- name: python_node
type: python
source:
type: code
path: pick_an_image.py
inputs:
image_1: ${inputs.image}
image_2: logo_2.png
- name: python_node_2
type: python
source:
type: code
path: pick_an_image.py
inputs:
image_1: ${python_node.output}
image_2: logo_2.png
| promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_invalid_default_value/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/python_tool_with_invalid_default_value/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 193
} | 76 |
from pathlib import Path
from promptflow import tool
print(f"The script is {__file__}")
assert Path(__file__).is_absolute(), f"__file__ should be absolute path, got {__file__}"
@tool
def my_python_tool(input1: str) -> str:
from pathlib import Path
assert Path(__file__).name == "script_with___file__.py"
assert __name__ == "__pf_main__"
print(f"Prompt: {input1} {__file__}")
return f"Prompt: {input1} {__file__}"
| promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/script_with___file__.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/script_with___file__/script_with___file__.py",
"repo_id": "promptflow",
"token_count": 165
} | 77 |
{"num": "hello"} | promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool/inputs.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/simple_flow_with_python_tool/inputs.jsonl",
"repo_id": "promptflow",
"token_count": 6
} | 78 |
from promptflow import tool
from promptflow.contracts.types import AssistantDefinition
@tool
def test_assistant_definition(message: str, assistant_definition: AssistantDefinition):
assert assistant_definition.model == "mock_model"
assert assistant_definition.instructions == "mock_instructions"
invoker = assistant_definition.init_tool_invoker()
openai_definition = invoker.to_openai_tools()
assert len(openai_definition) == 1
assert openai_definition[0]["function"]["description"] == "This tool is used to echo the message back."
assert openai_definition[0]["function"]["parameters"]["properties"] == {
"message": {"description": "The message to echo.", "type": "string"}
}
assert openai_definition[0]["function"]["parameters"]["required"] == ["message"]
assert invoker.invoke_tool("echo", {"message": message}) == "Hello World!"
return assistant_definition.serialize()
| promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/test_assistant_definition.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/tool_with_assistant_definition/test_assistant_definition.py",
"repo_id": "promptflow",
"token_count": 283
} | 79 |
{"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
{"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
{"url": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", "answer": "Channel", "evidence": "Url"}
| promptflow/src/promptflow/tests/test_configs/flows/web_classification_input_dir/details.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/flows/web_classification_input_dir/details.jsonl",
"repo_id": "promptflow",
"token_count": 120
} | 80 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.032'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.073'
status:
code: 200
message: OK
- request:
body: '{"filters": [{"field": "type", "operator": "eq", "values": ["flows"]},
{"field": "annotations/isArchived", "operator": "eq", "values": ["false"]},
{"field": "properties/creationContext/createdBy/userTenantId", "operator": "eq",
"values": ["00000000-0000-0000-0000-000000000000"]}, {"field": "properties/creationContext/createdBy/userObjectId",
"operator": "eq", "values": ["00000000-0000-0000-0000-000000000000"]}], "freeTextSearch":
"", "order": [{"direction": "Desc", "field": "properties/creationContext/createdTime"}],
"pageSize": 3, "skip": 0, "includeTotalResultCount": true, "searchBuilder":
"AppendPrefix"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '614'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/index/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/entities
response:
body:
string: '{"totalCount": 23, "value": [{"relevancyScore": 1.0, "entityResourceName":
"promptflow-eastus", "highlights": {}, "usage": {"totalCount": 0}, "schemaId":
"b5db529c-c91d-5e96-8288-f175dd87470d", "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/flows/objectId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:2e039c90-c69a-4c52-9754-d3a93c6eaa7e",
"kind": "Unversioned", "annotations": {"archived": false, "tags": {"owner":
"sdk-test"}, "flowName": "7c7f43f9-9b23-424f-b93e-1852d5e3dc9e", "createdDate":
"2024-01-17T07:08:35.8574809Z", "lastModifiedDate": "2024-01-17T07:08:35.8574809Z",
"owner": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "userPrincipalName":
null}, "isArchived": false, "vmSize": null, "maxIdleTimeSeconds": null, "name":
null, "description": "test flow description"}, "properties": {"updatedTime":
"0001-01-01T00:00:00+00:00", "creationContext": {"createdTime": "2024-01-17T07:08:35.8574809+00:00",
"createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "userPrincipalName":
null}, "creationSource": null}, "flowId": "2e039c90-c69a-4c52-9754-d3a93c6eaa7e",
"experimentId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "flowType": "Default",
"flowDefinitionFilePath": "Users/unknown_user/promptflow/simple_hello_world-01-17-2024-15-08-13/flow.dag.yaml"},
"internal": {}, "updateSequence": 638410721158574809, "type": "flows", "version":
null, "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "entityObjectId":
"3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:2e039c90-c69a-4c52-9754-d3a93c6eaa7e",
"resourceType": "Workspace", "relationships": []}, {"relevancyScore": 1.0,
"entityResourceName": "promptflow-eastus", "highlights": {}, "usage": {"totalCount":
0}, "schemaId": "b5db529c-c91d-5e96-8288-f175dd87470d", "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/flows/objectId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:7b4314f4-2f4f-43bf-af55-e64d9e7d5361",
"kind": "Unversioned", "annotations": {"archived": false, "tags": {"owner":
"sdk-test"}, "flowName": "1d41d299-f9e5-4405-8bac-569f3ea1141b", "createdDate":
"2024-01-17T07:07:45.269399Z", "lastModifiedDate": "2024-01-17T07:07:45.2693991Z",
"owner": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "userPrincipalName":
null}, "isArchived": false, "vmSize": null, "maxIdleTimeSeconds": null, "name":
null, "description": "test flow description"}, "properties": {"updatedTime":
"0001-01-01T00:00:00+00:00", "creationContext": {"createdTime": "2024-01-17T07:07:45.269399+00:00",
"createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "userPrincipalName":
null}, "creationSource": null}, "flowId": "7b4314f4-2f4f-43bf-af55-e64d9e7d5361",
"experimentId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "flowType": "Default",
"flowDefinitionFilePath": "Users/unknown_user/promptflow/simple_hello_world-01-17-2024-15-07-23/flow.dag.yaml"},
"internal": {}, "updateSequence": 638410720652693991, "type": "flows", "version":
null, "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "entityObjectId":
"3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:7b4314f4-2f4f-43bf-af55-e64d9e7d5361",
"resourceType": "Workspace", "relationships": []}, {"relevancyScore": 1.0,
"entityResourceName": "promptflow-eastus", "highlights": {}, "usage": {"totalCount":
0}, "schemaId": "b5db529c-c91d-5e96-8288-f175dd87470d", "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/flows/objectId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:b28c9746-fac1-40ed-9d65-f93857d05655",
"kind": "Unversioned", "annotations": {"archived": false, "tags": {"owner":
"sdk-test"}, "flowName": "b9a89f25-0efa-40ad-86cd-15e16c739f42", "createdDate":
"2024-01-17T07:06:54.5784734Z", "lastModifiedDate": "2024-01-17T07:06:54.5784735Z",
"owner": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "userPrincipalName":
null}, "isArchived": false, "vmSize": null, "maxIdleTimeSeconds": null, "name":
null, "description": "test flow description"}, "properties": {"updatedTime":
"0001-01-01T00:00:00+00:00", "creationContext": {"createdTime": "2024-01-17T07:06:54.5784734+00:00",
"createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000", "userTenantId":
"00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "userPrincipalName":
null}, "creationSource": null}, "flowId": "b28c9746-fac1-40ed-9d65-f93857d05655",
"experimentId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "flowType": "Default",
"flowDefinitionFilePath": "Users/unknown_user/promptflow/simple_hello_world-01-17-2024-15-06-31/flow.dag.yaml"},
"internal": {}, "updateSequence": 638410720145784735, "type": "flows", "version":
null, "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "entityObjectId":
"3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:b28c9746-fac1-40ed-9d65-f93857d05655",
"resourceType": "Workspace", "relationships": []}], "nextSkip": 3, "entityContainerIdsToEntityContainerMetadata":
{"3e123da1-f9a5-4c91-9234-8d9ffbb39ff5": {"resourceId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5",
"subscriptionId": "96aede12-2f73-41cb-b983-6d11a904839b", "resourceGroup":
"promptflow", "resourceName": "promptflow-eastus", "entityContainerType":
"Workspace", "regions": [{"regionName": "eastus", "isPrimaryRegion": true}],
"tenantId": "00000000-0000-0000-0000-000000000000", "immutableResourceId":
"3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "isPublicResource": false}}, "resourcesNotQueriedReasons":
{}, "numberOfEntityContainersNotQueried": 0, "fanoutData": {"Multitenant":
{"nextSkip": 3, "isShardDone": false, "didShardFail": false, "totalCount":
23, "resourceIdsOnShardThisPage": ["3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"]}},
"regionalFanoutState": {"shardFanoutStates": [{"shardId": "Multitenant", "nextSkip":
3, "isPlanExecutionDone": false, "didPlanExecutionFail": false, "totalCount":
23, "resourceIdsOnShardThisPage": ["3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"]}],
"firstPageStartTime": null}, "shardErrors": {}, "canSupportSkip": true}'
headers:
connection:
- keep-alive
content-length:
- '6309'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.114'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_flow_operations_TestFlow_test_list_flows.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_flow_operations_TestFlow_test_list_flows.yaml",
"repo_id": "promptflow",
"token_count": 5582
} | 81 |
interactions:
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000",
"name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location":
"eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic",
"tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}'
headers:
cache-control:
- no-cache
content-length:
- '3630'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.029'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false
response:
body:
string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}]}'
headers:
cache-control:
- no-cache
content-length:
- '1372'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.059'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.099'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.125'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:53:29 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/numbers.jsonl
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '290'
content-md5:
- O6LvdPMlN/PM6b7fPh75Jw==
content-type:
- application/octet-stream
last-modified:
- Tue, 26 Dec 2023 09:52:29 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Tue, 26 Dec 2023 09:52:29 GMT
x-ms-meta-name:
- 229ce463-c199-4588-a9c4-c30e7a4bd25c
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- 4cda6a90-97ca-4ad5-b420-5e347369f614
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:53:30 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/numbers.jsonl
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.082'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.097'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:53:34 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/two/flow.dag.yaml
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '242'
content-md5:
- ySItjr6//pwsGdjLZfgq0A==
content-type:
- application/octet-stream
last-modified:
- Tue, 26 Dec 2023 09:53:17 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Tue, 26 Dec 2023 09:52:31 GMT
x-ms-meta-name:
- 5b163be8-ab29-4f62-bef0-cd64d56ab269
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:53:35 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/two/flow.dag.yaml
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/two/flow.dag.yaml", "runId":
"run1", "runDisplayName": "run1", "runExperimentName": "", "batchDataInput":
{"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/numbers.jsonl"},
"inputsMapping": {"number": "${data.value}"}, "connections": {}, "environmentVariables":
{}, "runtimeName": "fake-runtime-name", "sessionId": "000000000000000000000000000000000000000000000000",
"sessionSetupMode": "SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '783'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"run1"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '6.387'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_two", "type": "python", "source":
{"type": "code", "path": "mod_two.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_two.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_two.py", "type": "python", "inputs":
{"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_two.py", "function": "mod_two",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_two.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run1/flowRuns/run1",
"flowRunId": "run1", "flowRunDisplayName": "run1", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/7e5ac781513436b66626132fefb20d1f/numbers.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"number": "${data.value}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run1/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "d15d3732-36a4-45ac-b53b-e1fe695b2e77",
"studioPortalEndpoint": "https://ml.azure.com/runs/run1?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12860'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.458'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_two", "type": "python", "source":
{"type": "code", "path": "mod_two.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_two.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_two.py", "type": "python", "inputs":
{"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_two.py", "function": "mod_two",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_two.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run1/flowRuns/run1",
"flowRunId": "run1", "flowRunDisplayName": "run1", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/7e5ac781513436b66626132fefb20d1f/numbers.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"number": "${data.value}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run1/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "d15d3732-36a4-45ac-b53b-e1fe695b2e77",
"studioPortalEndpoint": "https://ml.azure.com/runs/run1?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12860'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.472'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_two", "type": "python", "source":
{"type": "code", "path": "mod_two.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_two.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_two.py", "type": "python", "inputs":
{"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_two.py", "function": "mod_two",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_two.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run1/flowRuns/run1",
"flowRunId": "run1", "flowRunDisplayName": "run1", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/7e5ac781513436b66626132fefb20d1f/numbers.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"number": "${data.value}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run1/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "d15d3732-36a4-45ac-b53b-e1fe695b2e77",
"studioPortalEndpoint": "https://ml.azure.com/runs/run1?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12860'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.262'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_two", "type": "python", "source":
{"type": "code", "path": "mod_two.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_two.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_two.py", "type": "python", "inputs":
{"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_two.py", "function": "mod_two",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_two.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run1/flowRuns/run1",
"flowRunId": "run1", "flowRunDisplayName": "run1", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/7e5ac781513436b66626132fefb20d1f/numbers.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"number": "${data.value}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run1/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "d15d3732-36a4-45ac-b53b-e1fe695b2e77",
"studioPortalEndpoint": "https://ml.azure.com/runs/run1?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12860'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.273'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_two", "type": "python", "source":
{"type": "code", "path": "mod_two.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_two.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_two.py", "type": "python", "inputs":
{"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_two.py", "function": "mod_two",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_two.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run1/flowRuns/run1",
"flowRunId": "run1", "flowRunDisplayName": "run1", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/7e5ac781513436b66626132fefb20d1f/numbers.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"number": "${data.value}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run1/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "d15d3732-36a4-45ac-b53b-e1fe695b2e77",
"studioPortalEndpoint": "https://ml.azure.com/runs/run1?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12860'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.576'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_two", "type": "python", "source":
{"type": "code", "path": "mod_two.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_two.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_two.py", "type": "python", "inputs":
{"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_two.py", "function": "mod_two",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_two.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run1/flowRuns/run1",
"flowRunId": "run1", "flowRunDisplayName": "run1", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/7e5ac781513436b66626132fefb20d1f/numbers.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"number": "${data.value}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run1/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "d15d3732-36a4-45ac-b53b-e1fe695b2e77",
"studioPortalEndpoint": "https://ml.azure.com/runs/run1?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12860'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.422'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_two", "type": "python", "source":
{"type": "code", "path": "mod_two.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_two.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_two.py", "type": "python", "inputs":
{"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_two.py", "function": "mod_two",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_two.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run1/flowRuns/run1",
"flowRunId": "run1", "flowRunDisplayName": "run1", "batchDataInput": {"dataUri":
"azureml://datastores/workspaceblobstore/paths/LocalUpload/7e5ac781513436b66626132fefb20d1f/numbers.jsonl"},
"flowRunType": "FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci",
"inputsMapping": {"number": "${data.value}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run1/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "d15d3732-36a4-45ac-b53b-e1fe695b2e77",
"studioPortalEndpoint": "https://ml.azure.com/runs/run1?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12860'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.279'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1/childRuns?endIndex=24&startIndex=0
response:
body:
string: '[{"run_id": "run1_2", "status": "Completed", "error": null, "inputs":
{"number": 2, "line_number": 2}, "output": {"output": 2}, "metrics": null,
"request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:58.36557Z",
"end_time": "2024-01-12T08:53:58.379665Z", "index": 2, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 2}, "output": {"value": 2},
"start_time": 1705049638.376116, "end_time": 1705049638.377043, "error": null,
"children": null, "node_name": "mod_two"}], "variant_id": "", "name": "",
"description": "", "tags": null, "system_metrics": {"duration": 0.014095,
"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "result":
{"output": 2}, "upload_metrics": false}, {"run_id": "run1_1", "status": "Failed",
"error": {"message": "Execution failure in ''mod_two'': (Exception) cannot
mod 2!", "messageFormat": "Execution failure in ''{node_name}'': {error_type_and_message}",
"messageParameters": {"node_name": "mod_two", "error_type_and_message": "(Exception)
cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code": "UserError",
"innerError": {"code": "ToolExecutionError", "innerError": null}, "additionalInfo":
[{"type": "ToolExecutionErrorDetails", "info": {"type": "Exception", "message":
"cannot mod 2!", "traceback": "Traceback (most recent call last):\n File
\"/mnt/host/service/app/39649/requests/run1/mod_two.py\", line 7, in mod_two\n raise
Exception(\"cannot mod 2!\")\nException: cannot mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 1, "line_number": 1}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:58.382319Z",
"end_time": "2024-01-12T08:53:58.499971Z", "index": 1, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 1}, "output": null, "start_time":
1705049638.40109, "end_time": 1705049638.401905, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.117652, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run1_0", "status":
"Completed", "error": null, "inputs": {"number": 0, "line_number": 0}, "output":
{"output": 0}, "metrics": null, "request": null, "parent_run_id": "run1",
"root_run_id": "run1", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:53:58.298983Z", "end_time": "2024-01-12T08:53:58.326471Z",
"index": 0, "api_calls": [{"name": "mod_two", "type": "Tool", "inputs": {"number":
0}, "output": {"value": 0}, "start_time": 1705049638.316181, "end_time": 1705049638.316979,
"error": null, "children": null, "node_name": "mod_two"}], "variant_id": "",
"name": "", "description": "", "tags": null, "system_metrics": {"duration":
0.027488, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
"result": {"output": 0}, "upload_metrics": false}, {"run_id": "run1_4", "status":
"Completed", "error": null, "inputs": {"number": 4, "line_number": 4}, "output":
{"output": 4}, "metrics": null, "request": null, "parent_run_id": "run1",
"root_run_id": "run1", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:53:58.445979Z", "end_time": "2024-01-12T08:53:58.472475Z",
"index": 4, "api_calls": [{"name": "mod_two", "type": "Tool", "inputs": {"number":
4}, "output": {"value": 4}, "start_time": 1705049638.469504, "end_time": 1705049638.470396,
"error": null, "children": null, "node_name": "mod_two"}], "variant_id": "",
"name": "", "description": "", "tags": null, "system_metrics": {"duration":
0.026496, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
"result": {"output": 4}, "upload_metrics": false}, {"run_id": "run1_3", "status":
"Failed", "error": {"message": "Execution failure in ''mod_two'': (Exception)
cannot mod 2!", "messageFormat": "Execution failure in ''{node_name}'': {error_type_and_message}",
"messageParameters": {"node_name": "mod_two", "error_type_and_message": "(Exception)
cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code": "UserError",
"innerError": {"code": "ToolExecutionError", "innerError": null}, "additionalInfo":
[{"type": "ToolExecutionErrorDetails", "info": {"type": "Exception", "message":
"cannot mod 2!", "traceback": "Traceback (most recent call last):\n File
\"/mnt/host/service/app/39649/requests/run1/mod_two.py\", line 7, in mod_two\n raise
Exception(\"cannot mod 2!\")\nException: cannot mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 3, "line_number": 3}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:58.440354Z",
"end_time": "2024-01-12T08:53:58.453661Z", "index": 3, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 3}, "output": null, "start_time":
1705049638.445455, "end_time": 1705049638.448284, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.013307, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run1_7", "status":
"Failed", "error": {"message": "Execution failure in ''mod_two'': (Exception)
cannot mod 2!", "messageFormat": "Execution failure in ''{node_name}'': {error_type_and_message}",
"messageParameters": {"node_name": "mod_two", "error_type_and_message": "(Exception)
cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code": "UserError",
"innerError": {"code": "ToolExecutionError", "innerError": null}, "additionalInfo":
[{"type": "ToolExecutionErrorDetails", "info": {"type": "Exception", "message":
"cannot mod 2!", "traceback": "Traceback (most recent call last):\n File
\"/mnt/host/service/app/39649/requests/run1/mod_two.py\", line 7, in mod_two\n raise
Exception(\"cannot mod 2!\")\nException: cannot mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 7, "line_number": 7}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:58.508983Z",
"end_time": "2024-01-12T08:53:58.621053Z", "index": 7, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 7}, "output": null, "start_time":
1705049638.512829, "end_time": 1705049638.513634, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.11207, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run1_8", "status":
"Completed", "error": null, "inputs": {"number": 8, "line_number": 8}, "output":
{"output": 8}, "metrics": null, "request": null, "parent_run_id": "run1",
"root_run_id": "run1", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:53:58.529121Z", "end_time": "2024-01-12T08:53:58.538356Z",
"index": 8, "api_calls": [{"name": "mod_two", "type": "Tool", "inputs": {"number":
8}, "output": {"value": 8}, "start_time": 1705049638.535074, "end_time": 1705049638.535882,
"error": null, "children": null, "node_name": "mod_two"}], "variant_id": "",
"name": "", "description": "", "tags": null, "system_metrics": {"duration":
0.009235, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
"result": {"output": 8}, "upload_metrics": false}, {"run_id": "run1_6", "status":
"Completed", "error": null, "inputs": {"number": 6, "line_number": 6}, "output":
{"output": 6}, "metrics": null, "request": null, "parent_run_id": "run1",
"root_run_id": "run1", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:53:58.481907Z", "end_time": "2024-01-12T08:53:58.489299Z",
"index": 6, "api_calls": [{"name": "mod_two", "type": "Tool", "inputs": {"number":
6}, "output": {"value": 6}, "start_time": 1705049638.486027, "end_time": 1705049638.487006,
"error": null, "children": null, "node_name": "mod_two"}], "variant_id": "",
"name": "", "description": "", "tags": null, "system_metrics": {"duration":
0.007392, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0},
"result": {"output": 6}, "upload_metrics": false}, {"run_id": "run1_9", "status":
"Failed", "error": {"message": "Execution failure in ''mod_two'': (Exception)
cannot mod 2!", "messageFormat": "Execution failure in ''{node_name}'': {error_type_and_message}",
"messageParameters": {"node_name": "mod_two", "error_type_and_message": "(Exception)
cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code": "UserError",
"innerError": {"code": "ToolExecutionError", "innerError": null}, "additionalInfo":
[{"type": "ToolExecutionErrorDetails", "info": {"type": "Exception", "message":
"cannot mod 2!", "traceback": "Traceback (most recent call last):\n File
\"/mnt/host/service/app/39649/requests/run1/mod_two.py\", line 7, in mod_two\n raise
Exception(\"cannot mod 2!\")\nException: cannot mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 9, "line_number": 9}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:58.529591Z",
"end_time": "2024-01-12T08:53:58.643284Z", "index": 9, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 9}, "output": null, "start_time":
1705049638.534116, "end_time": 1705049638.535141, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.113693, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run1_10", "status":
"Completed", "error": null, "inputs": {"number": 10, "line_number": 10}, "output":
{"output": 10}, "metrics": null, "request": null, "parent_run_id": "run1",
"root_run_id": "run1", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:53:58.567671Z", "end_time": "2024-01-12T08:53:58.573553Z",
"index": 10, "api_calls": [{"name": "mod_two", "type": "Tool", "inputs": {"number":
10}, "output": {"value": 10}, "start_time": 1705049638.570297, "end_time":
1705049638.571094, "error": null, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.005882, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": {"output": 10}, "upload_metrics": false}, {"run_id": "run1_11",
"status": "Failed", "error": {"message": "Execution failure in ''mod_two'':
(Exception) cannot mod 2!", "messageFormat": "Execution failure in ''{node_name}'':
{error_type_and_message}", "messageParameters": {"node_name": "mod_two", "error_type_and_message":
"(Exception) cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code":
"UserError", "innerError": {"code": "ToolExecutionError", "innerError": null},
"additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 2!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 11, "line_number": 11}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:58.623786Z",
"end_time": "2024-01-12T08:53:58.634409Z", "index": 11, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 11}, "output": null, "start_time":
1705049638.627165, "end_time": 1705049638.629035, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.010623, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run1_5", "status":
"Failed", "error": {"message": "Execution failure in ''mod_two'': (Exception)
cannot mod 2!", "messageFormat": "Execution failure in ''{node_name}'': {error_type_and_message}",
"messageParameters": {"node_name": "mod_two", "error_type_and_message": "(Exception)
cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code": "UserError",
"innerError": {"code": "ToolExecutionError", "innerError": null}, "additionalInfo":
[{"type": "ToolExecutionErrorDetails", "info": {"type": "Exception", "message":
"cannot mod 2!", "traceback": "Traceback (most recent call last):\n File
\"/mnt/host/service/app/39649/requests/run1/mod_two.py\", line 7, in mod_two\n raise
Exception(\"cannot mod 2!\")\nException: cannot mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 5, "line_number": 5}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:58.478106Z",
"end_time": "2024-01-12T08:53:58.485903Z", "index": 5, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 5}, "output": null, "start_time":
1705049638.480935, "end_time": 1705049638.481815, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.007797, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run1_12", "status":
"Completed", "error": null, "inputs": {"number": 12, "line_number": 12}, "output":
{"output": 12}, "metrics": null, "request": null, "parent_run_id": "run1",
"root_run_id": "run1", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:53:58.857735Z", "end_time": "2024-01-12T08:53:58.8594Z",
"index": 12, "api_calls": [{"name": "mod_two", "type": "Tool", "inputs": {"number":
12}, "output": {"value": 12}, "start_time": 1705049638.85876, "end_time":
1705049638.858846, "error": null, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.001665, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": {"output": 12}, "upload_metrics": false}, {"run_id": "run1_13",
"status": "Failed", "error": {"message": "Execution failure in ''mod_two'':
(Exception) cannot mod 2!", "messageFormat": "Execution failure in ''{node_name}'':
{error_type_and_message}", "messageParameters": {"node_name": "mod_two", "error_type_and_message":
"(Exception) cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code":
"UserError", "innerError": {"code": "ToolExecutionError", "innerError": null},
"additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 2!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 13, "line_number": 13}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:59.081618Z",
"end_time": "2024-01-12T08:53:59.084304Z", "index": 13, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 13}, "output": null, "start_time":
1705049639.082711, "end_time": 1705049639.082963, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.002686, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run1_14", "status":
"Completed", "error": null, "inputs": {"number": 14, "line_number": 14}, "output":
{"output": 14}, "metrics": null, "request": null, "parent_run_id": "run1",
"root_run_id": "run1", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:53:59.095533Z", "end_time": "2024-01-12T08:53:59.097688Z",
"index": 14, "api_calls": [{"name": "mod_two", "type": "Tool", "inputs": {"number":
14}, "output": {"value": 14}, "start_time": 1705049639.096824, "end_time":
1705049639.096905, "error": null, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.002155, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": {"output": 14}, "upload_metrics": false}, {"run_id": "run1_15",
"status": "Failed", "error": {"message": "Execution failure in ''mod_two'':
(Exception) cannot mod 2!", "messageFormat": "Execution failure in ''{node_name}'':
{error_type_and_message}", "messageParameters": {"node_name": "mod_two", "error_type_and_message":
"(Exception) cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code":
"UserError", "innerError": {"code": "ToolExecutionError", "innerError": null},
"additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 2!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 15, "line_number": 15}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:59.11042Z",
"end_time": "2024-01-12T08:53:59.112899Z", "index": 15, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 15}, "output": null, "start_time":
1705049639.111395, "end_time": 1705049639.111456, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.002479, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run1_16", "status":
"Completed", "error": null, "inputs": {"number": 16, "line_number": 16}, "output":
{"output": 16}, "metrics": null, "request": null, "parent_run_id": "run1",
"root_run_id": "run1", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:53:59.122603Z", "end_time": "2024-01-12T08:53:59.125415Z",
"index": 16, "api_calls": [{"name": "mod_two", "type": "Tool", "inputs": {"number":
16}, "output": {"value": 16}, "start_time": 1705049639.123932, "end_time":
1705049639.124128, "error": null, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.002812, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": {"output": 16}, "upload_metrics": false}, {"run_id": "run1_17",
"status": "Failed", "error": {"message": "Execution failure in ''mod_two'':
(Exception) cannot mod 2!", "messageFormat": "Execution failure in ''{node_name}'':
{error_type_and_message}", "messageParameters": {"node_name": "mod_two", "error_type_and_message":
"(Exception) cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code":
"UserError", "innerError": {"code": "ToolExecutionError", "innerError": null},
"additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 2!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 17, "line_number": 17}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:59.201596Z",
"end_time": "2024-01-12T08:53:59.204779Z", "index": 17, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 17}, "output": null, "start_time":
1705049639.202943, "end_time": 1705049639.203028, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.003183, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run1_18", "status":
"Completed", "error": null, "inputs": {"number": 18, "line_number": 18}, "output":
{"output": 18}, "metrics": null, "request": null, "parent_run_id": "run1",
"root_run_id": "run1", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:53:59.216109Z", "end_time": "2024-01-12T08:53:59.21858Z",
"index": 18, "api_calls": [{"name": "mod_two", "type": "Tool", "inputs": {"number":
18}, "output": {"value": 18}, "start_time": 1705049639.217608, "end_time":
1705049639.217688, "error": null, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.002471, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": {"output": 18}, "upload_metrics": false}, {"run_id": "run1_19",
"status": "Failed", "error": {"message": "Execution failure in ''mod_two'':
(Exception) cannot mod 2!", "messageFormat": "Execution failure in ''{node_name}'':
{error_type_and_message}", "messageParameters": {"node_name": "mod_two", "error_type_and_message":
"(Exception) cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "code":
"UserError", "innerError": {"code": "ToolExecutionError", "innerError": null},
"additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 2!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_two'': (Exception) cannot mod 2!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null}}}, "inputs": {"number": 19, "line_number": 19}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run1", "root_run_id": "run1", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:53:59.231917Z",
"end_time": "2024-01-12T08:53:59.267557Z", "index": 19, "api_calls": [{"name":
"mod_two", "type": "Tool", "inputs": {"number": 19}, "output": null, "start_time":
1705049639.233619, "end_time": 1705049639.233699, "error": {"message": "cannot
mod 2!", "type": "Exception"}, "children": null, "node_name": "mod_two"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.03564, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}]'
headers:
connection:
- keep-alive
content-length:
- '57297'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.805'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1/childRuns?endIndex=49&startIndex=25
response:
body:
string: '[]'
headers:
connection:
- keep-alive
content-length:
- '2'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '0.746'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore
response:
body:
string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore",
"name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores",
"properties": {"description": null, "tags": null, "properties": null, "isDefault":
true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty":
null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup":
"00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name",
"containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol":
"https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"},
"systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy":
"779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt":
"2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a",
"lastModifiedByType": "Application"}}'
headers:
cache-control:
- no-cache
content-length:
- '1227'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding,Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.077'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '0'
User-Agent:
- promptflow-sdk/0.0.1 azure-ai-ml/1.12.1 azsdk-python-mgmt-machinelearningservices/0.1.0
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets
response:
body:
string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}'
headers:
cache-control:
- no-cache
content-length:
- '134'
content-type:
- application/json; charset=utf-8
expires:
- '-1'
pragma:
- no-cache
strict-transport-security:
- max-age=31536000; includeSubDomains
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.118'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:55:07 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/three/flow.dag.yaml
response:
body:
string: ''
headers:
accept-ranges:
- bytes
content-length:
- '248'
content-md5:
- B3pfhMEmUOazTzjlKaw6Sw==
content-type:
- application/octet-stream
last-modified:
- Tue, 26 Dec 2023 10:04:33 GMT
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
vary:
- Origin
x-ms-blob-type:
- BlockBlob
x-ms-creation-time:
- Tue, 26 Dec 2023 09:54:37 GMT
x-ms-meta-name:
- 613ead8f-69ca-4c47-9cba-01f0dd473279
x-ms-meta-upload_status:
- completed
x-ms-meta-version:
- '1'
x-ms-version:
- '2023-11-03'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/xml
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- azsdk-python-storage-blob/12.19.0 Python/3.10.13 (Windows-10-10.0.22631-SP0)
x-ms-date:
- Fri, 12 Jan 2024 08:55:08 GMT
x-ms-version:
- '2023-11-03'
method: HEAD
uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/three/flow.dag.yaml
response:
body:
string: ''
headers:
server:
- Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0
transfer-encoding:
- chunked
vary:
- Origin
x-ms-error-code:
- BlobNotFound
x-ms-version:
- '2023-11-03'
status:
code: 404
message: The specified blob does not exist.
- request:
body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath":
"LocalUpload/000000000000000000000000000000000000/three/flow.dag.yaml", "runId":
"run2", "runDisplayName": "run2", "runExperimentName": "", "variantRunId": "run1",
"batchDataInput": {}, "inputsMapping": {"number": "${run.outputs.output}"},
"connections": {}, "environmentVariables": {}, "runtimeName": "fake-runtime-name",
"sessionId": "000000000000000000000000000000000000000000000000", "sessionSetupMode":
"SystemWait", "flowLineageId": "0000000000000000000000000000000000000000000000000000000000000000",
"runDisplayNameGenerationType": "UserProvidedMacro"}'
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '734'
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: POST
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit
response:
body:
string: '"run2"'
headers:
connection:
- keep-alive
content-length:
- '38'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '6.653'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_three", "type": "python", "source":
{"type": "code", "path": "mod_three.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_three.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_three.py", "type": "python",
"inputs": {"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_three.py", "function": "mod_three",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_three.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run2/flowRuns/run2",
"flowRunId": "run2", "flowRunDisplayName": "run2", "batchDataInput": {}, "flowRunType":
"FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping":
{"number": "${run.outputs.output}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run2/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "a25bab13-d2d7-4c36-83bf-96979de95507",
"studioPortalEndpoint": "https://ml.azure.com/runs/run2?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12766'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.225'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_three", "type": "python", "source":
{"type": "code", "path": "mod_three.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_three.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_three.py", "type": "python",
"inputs": {"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_three.py", "function": "mod_three",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_three.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run2/flowRuns/run2",
"flowRunId": "run2", "flowRunDisplayName": "run2", "batchDataInput": {}, "flowRunType":
"FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping":
{"number": "${run.outputs.output}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run2/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "a25bab13-d2d7-4c36-83bf-96979de95507",
"studioPortalEndpoint": "https://ml.azure.com/runs/run2?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12766'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.496'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_three", "type": "python", "source":
{"type": "code", "path": "mod_three.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_three.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_three.py", "type": "python",
"inputs": {"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_three.py", "function": "mod_three",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_three.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run2/flowRuns/run2",
"flowRunId": "run2", "flowRunDisplayName": "run2", "batchDataInput": {}, "flowRunType":
"FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping":
{"number": "${run.outputs.output}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run2/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "a25bab13-d2d7-4c36-83bf-96979de95507",
"studioPortalEndpoint": "https://ml.azure.com/runs/run2?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12766'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.245'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_three", "type": "python", "source":
{"type": "code", "path": "mod_three.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_three.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_three.py", "type": "python",
"inputs": {"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_three.py", "function": "mod_three",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_three.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run2/flowRuns/run2",
"flowRunId": "run2", "flowRunDisplayName": "run2", "batchDataInput": {}, "flowRunType":
"FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping":
{"number": "${run.outputs.output}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run2/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "a25bab13-d2d7-4c36-83bf-96979de95507",
"studioPortalEndpoint": "https://ml.azure.com/runs/run2?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12766'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.381'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_three", "type": "python", "source":
{"type": "code", "path": "mod_three.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_three.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_three.py", "type": "python",
"inputs": {"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_three.py", "function": "mod_three",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_three.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run2/flowRuns/run2",
"flowRunId": "run2", "flowRunDisplayName": "run2", "batchDataInput": {}, "flowRunType":
"FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping":
{"number": "${run.outputs.output}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run2/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "a25bab13-d2d7-4c36-83bf-96979de95507",
"studioPortalEndpoint": "https://ml.azure.com/runs/run2?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12766'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.410'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_three", "type": "python", "source":
{"type": "code", "path": "mod_three.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_three.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_three.py", "type": "python",
"inputs": {"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_three.py", "function": "mod_three",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_three.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run2/flowRuns/run2",
"flowRunId": "run2", "flowRunDisplayName": "run2", "batchDataInput": {}, "flowRunType":
"FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping":
{"number": "${run.outputs.output}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run2/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "a25bab13-d2d7-4c36-83bf-96979de95507",
"studioPortalEndpoint": "https://ml.azure.com/runs/run2?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12766'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.408'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2
response:
body:
string: '{"flowGraph": {"nodes": [{"name": "mod_three", "type": "python", "source":
{"type": "code", "path": "mod_three.py"}, "inputs": {"number": "${inputs.number}"},
"tool": "mod_three.py", "reduce": false}], "tools": [{"name": "Content Safety
(Text Analyze)", "type": "python", "inputs": {"connection": {"type": ["AzureContentSafetyConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"hate_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"self_harm_category": {"type": ["string"], "default": "medium_sensitivity",
"enum": ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"sexual_category": {"type": ["string"], "default": "medium_sensitivity", "enum":
["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"text": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "violence_category": {"type": ["string"],
"default": "medium_sensitivity", "enum": ["disable", "low_sensitivity", "medium_sensitivity",
"high_sensitivity"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Use Azure Content Safety to detect
harmful content.", "module": "promptflow.tools.azure_content_safety", "function":
"analyze_text", "is_builtin": true, "package": "promptflow-tools", "package_version":
"0.0.216", "enable_kwargs": false, "deprecated_tools": ["content_safety_text.tools.content_safety_text_tool.analyze_text"],
"tool_state": "stable"}, {"name": "Embedding", "type": "python", "inputs":
{"connection": {"type": ["AzureOpenAIConnection", "OpenAIConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "deployment_name":
{"type": ["string"], "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"],
"model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"],
"capabilities": {"completion": false, "chat_completion": false, "embeddings":
true}, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "input": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model": {"type": ["string"], "enum": ["text-embedding-ada-002",
"text-search-ada-doc-001", "text-search-ada-query-001"], "enabled_by": "connection",
"enabled_by_type": ["OpenAIConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Open AI''s embedding
model to create an embedding vector representing the input text.", "module":
"promptflow.tools.embedding", "function": "embedding", "is_builtin": true,
"package": "promptflow-tools", "package_version": "0.0.216", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Open Source LLM", "type": "custom_llm",
"inputs": {"api": {"type": ["string"], "enum": ["chat", "completion"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CustomConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "deployment_name": {"type": ["string"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "endpoint_name":
{"type": ["string"], "default": "-- please enter an endpoint name --", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_new_tokens":
{"type": ["int"], "default": 500, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "model_kwargs": {"type": ["object"], "default":
"{}", "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default", "advanced": true}, "temperature": {"type": ["double"], "default":
1.0, "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "top_p": {"type": ["double"], "default": 1.0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default", "advanced": true}},
"description": "Use an Open Source model from the Azure Model catalog, deployed
to an AzureML Online Endpoint for LLM Chat or Completion API calls.", "module":
"promptflow.tools.open_source_llm", "class_name": "OpenSourceLLM", "function":
"call", "icon": "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg==",
"is_builtin": true, "package": "promptflow-tools", "package_version": "0.0.216",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "OpenAI GPT-4V",
"type": "custom_llm", "inputs": {"connection": {"type": ["OpenAIConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"frequency_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "max_tokens": {"type":
["int"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "model": {"type": ["string"], "enum": ["gpt-4-vision-preview"],
"allow_manual_entry": true, "is_multi_select": false, "input_type": "default"},
"presence_penalty": {"type": ["double"], "default": 0, "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "stop": {"type":
["list"], "default": "", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "temperature": {"type": ["double"], "default": 1,
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"top_p": {"type": ["double"], "default": 1, "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use OpenAI GPT-4V to leverage
vision ability.", "module": "promptflow.tools.openai_gpt4v", "class_name":
"OpenAI", "function": "chat", "is_builtin": true, "package": "promptflow-tools",
"package_version": "0.0.216", "default_prompt": "# system:\nAs an AI assistant,
your task involves interpreting images and responding to questions about the
image.\nRemember to provide accurate answers based on the information present
in the image.\n\n# user:\nCan you tell me what the image depicts?\n\n",
"enable_kwargs": false, "tool_state": "stable"}, {"name": "Serp API", "type":
"python", "inputs": {"connection": {"type": ["SerpConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "engine": {"type":
["string"], "default": "google", "enum": ["google", "bing"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "location": {"type":
["string"], "default": "", "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "num": {"type": ["int"], "default": "10",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"query": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "safe": {"type": ["string"], "default": "off",
"enum": ["active", "off"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Use Serp API to obtain search
results from a specific search engine.", "module": "promptflow.tools.serpapi",
"class_name": "SerpAPI", "function": "search", "is_builtin": true, "package":
"promptflow-tools", "package_version": "0.0.216", "enable_kwargs": false,
"tool_state": "stable"}, {"name": "Faiss Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "top_k": {"type": ["int"], "default": "3",
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"vector": {"type": ["list"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "description": "Search vector based query
from the FAISS index file.", "module": "promptflow_vectordb.tool.faiss_index_lookup",
"class_name": "FaissIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector DB Lookup", "type": "python",
"inputs": {"class_name": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["WeaviateConnection"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "collection_name": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["QdrantConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "connection": {"type":
["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "index_name": {"type":
["string"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection"],
"allow_manual_entry": false, "is_multi_select": false, "input_type": "default"},
"search_filters": {"type": ["object"], "enabled_by": "connection", "enabled_by_type":
["CognitiveSearchConnection", "QdrantConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}, "search_params": {"type":
["object"], "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection",
"QdrantConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "text_field": {"type": ["string"], "enabled_by":
"connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection",
"WeaviateConnection"], "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}, "top_k": {"type": ["int"], "default": "3", "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "vector": {"type":
["list"], "allow_manual_entry": false, "is_multi_select": false, "input_type":
"default"}, "vector_field": {"type": ["string"], "enabled_by": "connection",
"enabled_by_type": ["CognitiveSearchConnection"], "allow_manual_entry": false,
"is_multi_select": false, "input_type": "default"}}, "description": "Search
vector based query from existing Vector Database.", "module": "promptflow_vectordb.tool.vector_db_lookup",
"class_name": "VectorDBLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "Vector Index Lookup", "type": "python",
"inputs": {"path": {"type": ["string"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}, "query": {"type": ["object"], "allow_manual_entry":
false, "is_multi_select": false, "input_type": "default"}, "top_k": {"type":
["int"], "default": "3", "allow_manual_entry": false, "is_multi_select": false,
"input_type": "default"}}, "description": "Search text or vector based query
from AzureML Vector Index.", "module": "promptflow_vectordb.tool.vector_index_lookup",
"class_name": "VectorIndexLookup", "function": "search", "is_builtin": true,
"package": "promptflow-vectordb", "package_version": "0.0.1", "enable_kwargs":
false, "tool_state": "stable"}, {"name": "mod_three.py", "type": "python",
"inputs": {"number": {"type": ["int"], "allow_manual_entry": false, "is_multi_select":
false, "input_type": "default"}}, "source": "mod_three.py", "function": "mod_three",
"is_builtin": false, "enable_kwargs": false, "tool_state": "stable"}], "inputs":
{"number": {"type": "int", "is_chat_input": false}}, "outputs": {"output":
{"type": "int", "reference": "${mod_three.output.value}", "evaluation_only":
false, "is_chat_output": false}}}, "flowRunResourceId": "azureml://locations/eastus/workspaces/00000/flows/run2/flowRuns/run2",
"flowRunId": "run2", "flowRunDisplayName": "run2", "batchDataInput": {}, "flowRunType":
"FlowRun", "flowType": "Default", "runtimeName": "test-runtime-ci", "inputsMapping":
{"number": "${run.outputs.output}"}, "outputDatastoreName": "workspaceblobstore",
"childRunBasePath": "promptflow/PromptFlowArtifacts/run2/flow_artifacts",
"flowDagFileRelativePath": "flow.dag.yaml", "flowSnapshotId": "a25bab13-d2d7-4c36-83bf-96979de95507",
"studioPortalEndpoint": "https://ml.azure.com/runs/run2?wsid=/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000"}'
headers:
connection:
- keep-alive
content-length:
- '12766'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.451'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2/childRuns?endIndex=24&startIndex=0
response:
body:
string: '[{"run_id": "run2_0", "status": "Completed", "error": null, "inputs":
{"number": 0, "line_number": 0}, "output": {"output": 0}, "metrics": null,
"request": null, "parent_run_id": "run2", "root_run_id": "run2", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:55:31.465237Z",
"end_time": "2024-01-12T08:55:31.475743Z", "index": 0, "api_calls": [{"name":
"mod_three", "type": "Tool", "inputs": {"number": 0}, "output": {"value":
0}, "start_time": 1705049731.472262, "end_time": 1705049731.473128, "error":
null, "children": null, "node_name": "mod_three"}], "variant_id": "", "name":
"", "description": "", "tags": null, "system_metrics": {"duration": 0.010506,
"prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0}, "result":
{"output": 0}, "upload_metrics": false}, {"run_id": "run2_12", "status": "Completed",
"error": null, "inputs": {"number": 12, "line_number": 12}, "output": {"output":
12}, "metrics": null, "request": null, "parent_run_id": "run2", "root_run_id":
"run2", "source_run_id": null, "flow_id": "default_flow_id", "start_time":
"2024-01-12T08:55:31.656075Z", "end_time": "2024-01-12T08:55:31.661754Z",
"index": 12, "api_calls": [{"name": "mod_three", "type": "Tool", "inputs":
{"number": 12}, "output": {"value": 12}, "start_time": 1705049731.658932,
"end_time": 1705049731.659931, "error": null, "children": null, "node_name":
"mod_three"}], "variant_id": "", "name": "", "description": "", "tags": null,
"system_metrics": {"duration": 0.005679, "prompt_tokens": 0, "completion_tokens":
0, "total_tokens": 0}, "result": {"output": 12}, "upload_metrics": false},
{"run_id": "run2_2", "status": "Failed", "error": {"message": "Execution failure
in ''mod_three'': (Exception) cannot mod 3!", "messageFormat": "Execution
failure in ''{node_name}'': {error_type_and_message}", "messageParameters":
{"node_name": "mod_three", "error_type_and_message": "(Exception) cannot mod
3!"}, "referenceCode": "Tool/__pf_main__", "code": "UserError", "innerError":
{"code": "ToolExecutionError", "innerError": null}, "additionalInfo": [{"type":
"ToolExecutionErrorDetails", "info": {"type": "Exception", "message": "cannot
mod 3!", "traceback": "Traceback (most recent call last):\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n", "filename": "/mnt/host/service/app/39649/requests/run2/mod_three.py",
"lineno": 7, "name": "mod_three"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_three'': (Exception) cannot mod 3!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 3!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\n", "innerException":
null}}}, "inputs": {"number": 2, "line_number": 2}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run2", "root_run_id": "run2", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:55:31.490355Z",
"end_time": "2024-01-12T08:55:31.575384Z", "index": 2, "api_calls": [{"name":
"mod_three", "type": "Tool", "inputs": {"number": 2}, "output": null, "start_time":
1705049731.497823, "end_time": 1705049731.498797, "error": {"message": "cannot
mod 3!", "type": "Exception"}, "children": null, "node_name": "mod_three"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.085029, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run2_4", "status":
"Failed", "error": {"message": "Execution failure in ''mod_three'': (Exception)
cannot mod 3!", "messageFormat": "Execution failure in ''{node_name}'': {error_type_and_message}",
"messageParameters": {"node_name": "mod_three", "error_type_and_message":
"(Exception) cannot mod 3!"}, "referenceCode": "Tool/__pf_main__", "code":
"UserError", "innerError": {"code": "ToolExecutionError", "innerError": null},
"additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 3!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n", "filename": "/mnt/host/service/app/39649/requests/run2/mod_three.py",
"lineno": 7, "name": "mod_three"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_three'': (Exception) cannot mod 3!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 3!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\n", "innerException":
null}}}, "inputs": {"number": 4, "line_number": 4}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run2", "root_run_id": "run2", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:55:31.535972Z",
"end_time": "2024-01-12T08:55:31.773764Z", "index": 4, "api_calls": [{"name":
"mod_three", "type": "Tool", "inputs": {"number": 4}, "output": null, "start_time":
1705049731.548995, "end_time": 1705049731.550238, "error": {"message": "cannot
mod 3!", "type": "Exception"}, "children": null, "node_name": "mod_three"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.237792, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run2_8", "status":
"Failed", "error": {"message": "Execution failure in ''mod_three'': (Exception)
cannot mod 3!", "messageFormat": "Execution failure in ''{node_name}'': {error_type_and_message}",
"messageParameters": {"node_name": "mod_three", "error_type_and_message":
"(Exception) cannot mod 3!"}, "referenceCode": "Tool/__pf_main__", "code":
"UserError", "innerError": {"code": "ToolExecutionError", "innerError": null},
"additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 3!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n", "filename": "/mnt/host/service/app/39649/requests/run2/mod_three.py",
"lineno": 7, "name": "mod_three"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_three'': (Exception) cannot mod 3!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 3!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\n", "innerException":
null}}}, "inputs": {"number": 8, "line_number": 8}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run2", "root_run_id": "run2", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:55:31.559578Z",
"end_time": "2024-01-12T08:55:31.789137Z", "index": 8, "api_calls": [{"name":
"mod_three", "type": "Tool", "inputs": {"number": 8}, "output": null, "start_time":
1705049731.584465, "end_time": 1705049731.585312, "error": {"message": "cannot
mod 3!", "type": "Exception"}, "children": null, "node_name": "mod_three"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.229559, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run2_6", "status":
"Completed", "error": null, "inputs": {"number": 6, "line_number": 6}, "output":
{"output": 6}, "metrics": null, "request": null, "parent_run_id": "run2",
"root_run_id": "run2", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:55:31.555822Z", "end_time": "2024-01-12T08:55:31.564102Z",
"index": 6, "api_calls": [{"name": "mod_three", "type": "Tool", "inputs":
{"number": 6}, "output": {"value": 6}, "start_time": 1705049731.561458, "end_time":
1705049731.562356, "error": null, "children": null, "node_name": "mod_three"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.00828, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": {"output": 6}, "upload_metrics": false}, {"run_id": "run2_16",
"status": "Failed", "error": {"message": "Execution failure in ''mod_three'':
(Exception) cannot mod 3!", "messageFormat": "Execution failure in ''{node_name}'':
{error_type_and_message}", "messageParameters": {"node_name": "mod_three",
"error_type_and_message": "(Exception) cannot mod 3!"}, "referenceCode": "Tool/__pf_main__",
"code": "UserError", "innerError": {"code": "ToolExecutionError", "innerError":
null}, "additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 3!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n", "filename": "/mnt/host/service/app/39649/requests/run2/mod_three.py",
"lineno": 7, "name": "mod_three"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_three'': (Exception) cannot mod 3!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 3!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\n", "innerException":
null}}}, "inputs": {"number": 16, "line_number": 16}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run2", "root_run_id": "run2", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:55:31.744907Z",
"end_time": "2024-01-12T08:55:31.817842Z", "index": 16, "api_calls": [{"name":
"mod_three", "type": "Tool", "inputs": {"number": 16}, "output": null, "start_time":
1705049731.747285, "end_time": 1705049731.747465, "error": {"message": "cannot
mod 3!", "type": "Exception"}, "children": null, "node_name": "mod_three"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.072935, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run2_10", "status":
"Failed", "error": {"message": "Execution failure in ''mod_three'': (Exception)
cannot mod 3!", "messageFormat": "Execution failure in ''{node_name}'': {error_type_and_message}",
"messageParameters": {"node_name": "mod_three", "error_type_and_message":
"(Exception) cannot mod 3!"}, "referenceCode": "Tool/__pf_main__", "code":
"UserError", "innerError": {"code": "ToolExecutionError", "innerError": null},
"additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 3!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n", "filename": "/mnt/host/service/app/39649/requests/run2/mod_three.py",
"lineno": 7, "name": "mod_three"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_three'': (Exception) cannot mod 3!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 3!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\n", "innerException":
null}}}, "inputs": {"number": 10, "line_number": 10}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run2", "root_run_id": "run2", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:55:31.59072Z",
"end_time": "2024-01-12T08:55:31.604864Z", "index": 10, "api_calls": [{"name":
"mod_three", "type": "Tool", "inputs": {"number": 10}, "output": null, "start_time":
1705049731.59849, "end_time": 1705049731.600113, "error": {"message": "cannot
mod 3!", "type": "Exception"}, "children": null, "node_name": "mod_three"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.014144, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run2_14", "status":
"Failed", "error": {"message": "Execution failure in ''mod_three'': (Exception)
cannot mod 3!", "messageFormat": "Execution failure in ''{node_name}'': {error_type_and_message}",
"messageParameters": {"node_name": "mod_three", "error_type_and_message":
"(Exception) cannot mod 3!"}, "referenceCode": "Tool/__pf_main__", "code":
"UserError", "innerError": {"code": "ToolExecutionError", "innerError": null},
"additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 3!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n", "filename": "/mnt/host/service/app/39649/requests/run2/mod_three.py",
"lineno": 7, "name": "mod_three"}}], "debugInfo": {"type": "ToolExecutionError",
"message": "Execution failure in ''mod_three'': (Exception) cannot mod 3!",
"stackTrace": "\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 3!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\n", "innerException":
null}}}, "inputs": {"number": 14, "line_number": 14}, "output": null, "metrics":
null, "request": null, "parent_run_id": "run2", "root_run_id": "run2", "source_run_id":
null, "flow_id": "default_flow_id", "start_time": "2024-01-12T08:55:31.736285Z",
"end_time": "2024-01-12T08:55:31.745117Z", "index": 14, "api_calls": [{"name":
"mod_three", "type": "Tool", "inputs": {"number": 14}, "output": null, "start_time":
1705049731.739545, "end_time": 1705049731.740525, "error": {"message": "cannot
mod 3!", "type": "Exception"}, "children": null, "node_name": "mod_three"}],
"variant_id": "", "name": "", "description": "", "tags": null, "system_metrics":
{"duration": 0.008832, "prompt_tokens": 0, "completion_tokens": 0, "total_tokens":
0}, "result": null, "upload_metrics": false}, {"run_id": "run2_18", "status":
"Completed", "error": null, "inputs": {"number": 18, "line_number": 18}, "output":
{"output": 18}, "metrics": null, "request": null, "parent_run_id": "run2",
"root_run_id": "run2", "source_run_id": null, "flow_id": "default_flow_id",
"start_time": "2024-01-12T08:55:31.890045Z", "end_time": "2024-01-12T08:55:31.891993Z",
"index": 18, "api_calls": [{"name": "mod_three", "type": "Tool", "inputs":
{"number": 18}, "output": {"value": 18}, "start_time": 1705049731.891255,
"end_time": 1705049731.891333, "error": null, "children": null, "node_name":
"mod_three"}], "variant_id": "", "name": "", "description": "", "tags": null,
"system_metrics": {"duration": 0.001948, "prompt_tokens": 0, "completion_tokens":
0, "total_tokens": 0}, "result": {"output": 18}, "upload_metrics": false}]'
headers:
connection:
- keep-alive
content-length:
- '32873'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.930'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2/childRuns?endIndex=49&startIndex=25
response:
body:
string: '[]'
headers:
connection:
- keep-alive
content-length:
- '2'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
x-content-type-options:
- nosniff
x-request-time:
- '0.727'
status:
code: 200
message: OK
- request:
body: '{"runId": "run1", "selectRunMetadata": true, "selectRunDefinition": true,
"selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705049621, "rootRunId": "run1", "createdUtc":
"2024-01-12T08:53:41.7311265+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": {"error": {"code": "UserError", "severity":
null, "message": "Execution failure in ''mod_two'': (Exception) cannot mod
2!", "messageFormat": "{\"totalChildRuns\": 20, \"userErrorChildRuns\": 10,
\"systemErrorChildRuns\": 0, \"errorDetails\": [{\"code\": \"UserError/ToolExecutionError\",
\"messageFormat\": \"Execution failure in ''{node_name}'': {error_type_and_message}\",
\"count\": 10}]}", "messageParameters": {"node_name": "mod_two", "error_type_and_message":
"(Exception) cannot mod 2!"}, "referenceCode": "Tool/__pf_main__", "detailsUri":
null, "target": null, "details": [], "innerError": {"code": "ToolExecutionError",
"innerError": null}, "debugInfo": {"type": "ToolExecutionError", "message":
"Execution failure in ''mod_two'': (Exception) cannot mod 2!", "stackTrace":
"\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 2!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\n", "innerException":
null, "data": null, "errorResponse": null}, "data": null, "errorResponse":
null}, "additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 2!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n", "filename": "/mnt/host/service/app/39649/requests/run1/mod_two.py",
"lineno": 7, "name": "mod_two"}}]}, "correlation": null, "environment": null,
"location": null, "time": "2024-01-12T08:54:32.157997+00:00", "componentName":
"promptflow-runtime/20231204.v4 Designer/1.0 promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0) promptflow/1.2.0rc1"}, "warnings":
null, "revision": 7, "statusRevision": 3, "runUuid": "08457cff-a0cf-4b93-8b58-24b47e6e2f06",
"parentRunUuid": null, "rootRunUuid": "08457cff-a0cf-4b93-8b58-24b47e6e2f06",
"lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:34.5206194",
"effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:54:31.4291957+00:00", "duration":
"00:00:34.5206194", "cancelationReason": null, "currentAttemptId": 1, "runId":
"run1", "parentRunId": null, "experimentId": "f65cb39a-0d28-4b06-9ef9-b962ed9df8d0",
"status": "Completed", "startTimeUtc": "2024-01-12T08:53:57.8643652+00:00",
"endTimeUtc": "2024-01-12T08:54:32.3849846+00:00", "scheduleId": null, "displayName":
"run1", "name": null, "dataContainerId": "dcid.run1", "description": null,
"hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator":
null, "traits": [], "attribution": "PromptFlow", "computeType": "AmlcDsi"},
"properties": {"azureml.promptflow.runtime_name": "test-runtime-ci", "azureml.promptflow.runtime_version":
"20231204.v4", "azureml.promptflow.definition_file_name": "flow.dag.yaml",
"azureml.promptflow.session_id": "357876c0a66919ba9791ba9723d3eb045181f10b65c806ea",
"azureml.promptflow.flow_lineage_id": "3df9ed48cf83e1a38799b0a91f06e0825173b507d07391d1d91ec0253c1cda5c",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/ceb856845f8689bdee2da5a26bd95bab/two/flow.dag.yaml",
"azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/7e5ac781513436b66626132fefb20d1f/numbers.jsonl",
"azureml.promptflow.inputs_mapping": "{\"number\":\"${data.value}\"}", "_azureml.evaluation_run":
"promptflow.BatchRun", "azureml.promptflow.snapshot_id": "d15d3732-36a4-45ac-b53b-e1fe695b2e77",
"azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\":
\"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris":
{}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [],
"tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets":
[], "runDefinition": null, "jobSpecification": null, "primaryMetricName":
null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri":
null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId":
"azureml://locations/eastus/workspaces/00000/data/azureml_run1_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_run1_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '9907'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.053'
status:
code: 200
message: OK
- request:
body: '{"runId": "run2", "selectRunMetadata": true, "selectRunDefinition": true,
"selectJobSpecification": true}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '137'
Content-Type:
- application/json
User-Agent:
- python-requests/2.31.0
method: POST
uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata
response:
body:
string: '{"runMetadata": {"runNumber": 1705049714, "rootRunId": "run2", "createdUtc":
"2024-01-12T08:55:14.362818+00:00", "createdBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587",
"upn": null}, "userId": "00000000-0000-0000-0000-000000000000", "token": null,
"tokenExpiryTimeUtc": null, "error": {"error": {"code": "UserError", "severity":
null, "message": "Execution failure in ''mod_three'': (Exception) cannot mod
3!", "messageFormat": "{\"totalChildRuns\": 10, \"userErrorChildRuns\": 6,
\"systemErrorChildRuns\": 0, \"errorDetails\": [{\"code\": \"UserError/ToolExecutionError\",
\"messageFormat\": \"Execution failure in ''{node_name}'': {error_type_and_message}\",
\"count\": 6}]}", "messageParameters": {"node_name": "mod_three", "error_type_and_message":
"(Exception) cannot mod 3!"}, "referenceCode": "Tool/__pf_main__", "detailsUri":
null, "target": null, "details": [], "innerError": {"code": "ToolExecutionError",
"innerError": null}, "debugInfo": {"type": "ToolExecutionError", "message":
"Execution failure in ''mod_three'': (Exception) cannot mod 3!", "stackTrace":
"\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 451, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/_base.py\",
line 403, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/concurrent/futures/thread.py\",
line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 111, in _exec_single_node_in_thread\n result = context.invoke_tool(node,
f, kwargs=kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\n", "innerException": {"type": "Exception", "message":
"cannot mod 3!", "stackTrace": "Traceback (most recent call last):\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\n", "innerException":
null, "data": null, "errorResponse": null}, "data": null, "errorResponse":
null}, "additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type":
"Exception", "message": "cannot mod 3!", "traceback": "Traceback (most recent
call last):\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n", "filename": "/mnt/host/service/app/39649/requests/run2/mod_three.py",
"lineno": 7, "name": "mod_three"}}]}, "correlation": null, "environment":
null, "location": null, "time": "2024-01-12T08:56:05.377066+00:00", "componentName":
"promptflow-runtime/20231204.v4 Designer/1.0 promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0) promptflow/1.2.0rc1"}, "warnings":
null, "revision": 7, "statusRevision": 3, "runUuid": "b80a9962-ed21-4dfb-85b0-2548b1649f39",
"parentRunUuid": null, "rootRunUuid": "b80a9962-ed21-4dfb-85b0-2548b1649f39",
"lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:34.5231338",
"effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "00000000-0000-0000-0000-000000000000",
"userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/",
"userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "18a66f5f-dbdf-4c17-9dd7-1634712a9cbe",
"upn": null}, "lastModifiedUtc": "2024-01-12T08:56:04.2209326+00:00", "duration":
"00:00:34.5231338", "cancelationReason": null, "currentAttemptId": 1, "runId":
"run2", "parentRunId": null, "experimentId": "3a00e270-37b9-49be-a74e-ac675487979e",
"status": "Completed", "startTimeUtc": "2024-01-12T08:55:31.0651672+00:00",
"endTimeUtc": "2024-01-12T08:56:05.588301+00:00", "scheduleId": null, "displayName":
"run2", "name": null, "dataContainerId": "dcid.run2", "description": null,
"hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator":
null, "traits": [], "attribution": "PromptFlow", "computeType": "AmlcDsi"},
"properties": {"azureml.promptflow.runtime_name": "test-runtime-ci", "azureml.promptflow.runtime_version":
"20231204.v4", "azureml.promptflow.definition_file_name": "flow.dag.yaml",
"azureml.promptflow.session_id": "c9399af7028d644e85f3624a0b026432068432621519ab8f",
"azureml.promptflow.flow_lineage_id": "77a36a2606b22ee30674046884962374e57e822acdeccac7750905d98e944580",
"azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore",
"azureml.promptflow.flow_definition_blob_path": "LocalUpload/f0722e3fb27e86b101670dbb5e85554c/three/flow.dag.yaml",
"azureml.promptflow.input_run_id": "run1", "azureml.promptflow.inputs_mapping":
"{\"number\":\"${run.outputs.output}\"}", "_azureml.evaluation_run": "promptflow.BatchRun",
"azureml.promptflow.snapshot_id": "a25bab13-d2d7-4c36-83bf-96979de95507",
"azureml.promptflow.total_tokens": "0", "_azureml.evaluate_artifacts": "[{\"path\":
\"instance_results.jsonl\", \"type\": \"table\"}]"}, "parameters": {}, "actionUris":
{}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [],
"tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets":
[], "runDefinition": null, "jobSpecification": null, "primaryMetricName":
null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri":
null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace":
false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId":
"azureml://locations/eastus/workspaces/00000/data/azureml_run2_output_data_debug_info/versions/1",
"type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_run2_output_data_flow_outputs/versions/1",
"type": "UriFolder"}}}, "runDefinition": null, "jobSpecification": null, "systemSettings":
null}'
headers:
connection:
- keep-alive
content-length:
- '9865'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.037'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run1/logContent
response:
body:
string: '"2024-01-12 08:53:45 +0000 49 promptflow-runtime INFO [run1]
Receiving v2 bulk run request e51d6436-3ed9-4576-b848-1967710c148c: {\"flow_id\":
\"run1\", \"flow_run_id\": \"run1\", \"flow_source\": {\"flow_source_type\":
1, \"flow_source_info\": {\"snapshot_id\": \"d15d3732-36a4-45ac-b53b-e1fe695b2e77\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.run1/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T08%3A43%3A40Z&ske=2024-01-13T16%3A53%3A40Z&sks=b&skv=2019-07-07&st=2024-01-12T08%3A43%3A44Z&se=2024-01-12T16%3A53%3A44Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"data\": \"azureml://datastores/workspaceblobstore/paths/LocalUpload/7e5ac781513436b66626132fefb20d1f/numbers.jsonl\"},
\"inputs_mapping\": {\"number\": \"${data.value}\"}, \"azure_storage_setting\":
{\"azure_storage_mode\": 1, \"storage_account_name\": \"promptfloweast4063704120\",
\"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\",
\"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/run1\", \"blob_container_sas_token\":
\"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T08%3A53%3A45Z&ske=2024-01-19T08%3A53%3A45Z&sks=b&skv=2019-07-07&se=2024-01-19T08%3A53%3A45Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 08:53:45 +0000 49
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:53:45 +0000 49 promptflow-runtime INFO Updating
run1 to Status.Preparing...\n2024-01-12 08:53:45 +0000 49 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/39649/requests/run1\n2024-01-12
08:53:45 +0000 49 promptflow-runtime INFO Get snapshot sas url for
d15d3732-36a4-45ac-b53b-e1fe695b2e77...\n2024-01-12 08:53:52 +0000 49
promptflow-runtime INFO Downloading snapshot d15d3732-36a4-45ac-b53b-e1fe695b2e77
from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/d15d3732-36a4-45ac-b53b-e1fe695b2e77.zip...\n2024-01-12
08:53:52 +0000 49 promptflow-runtime INFO Downloaded file /mnt/host/service/app/39649/requests/run1/d15d3732-36a4-45ac-b53b-e1fe695b2e77.zip
with size 509 for snapshot d15d3732-36a4-45ac-b53b-e1fe695b2e77.\n2024-01-12
08:53:52 +0000 49 promptflow-runtime INFO Download snapshot d15d3732-36a4-45ac-b53b-e1fe695b2e77
completed.\n2024-01-12 08:53:52 +0000 49 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/39649/requests/run1\n2024-01-12
08:53:52 +0000 49 promptflow-runtime INFO About to execute a python
flow.\n2024-01-12 08:53:52 +0000 49 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-12 08:53:52 +0000 49 promptflow-runtime
INFO Starting to check process 6280 status for run run1\n2024-01-12 08:53:52
+0000 49 promptflow-runtime INFO Start checking run status for run
run1\n2024-01-12 08:53:56 +0000 6280 promptflow-runtime INFO [49--6280]
Start processing flowV2......\n2024-01-12 08:53:56 +0000 6280 promptflow-runtime
INFO Runtime version: 20231204.v4. PromptFlow version: 1.2.0rc1\n2024-01-12
08:53:56 +0000 6280 promptflow-runtime INFO Setting mlflow tracking
uri...\n2024-01-12 08:53:56 +0000 6280 promptflow-runtime INFO Validating
''AzureML Data Scientist'' user authentication...\n2024-01-12 08:53:56 +0000 6280
promptflow-runtime INFO Successfully validated ''AzureML Data Scientist''
user authentication.\n2024-01-12 08:53:56 +0000 6280 promptflow-runtime
INFO Using AzureMLRunStorageV2\n2024-01-12 08:53:56 +0000 6280 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:53:56 +0000 6280 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-12 08:53:56 +0000 6280 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:53:57 +0000 6280 promptflow-runtime INFO Resolve data from url finished
in 0.6618335284292698 seconds\n2024-01-12 08:53:57 +0000 6280 promptflow-runtime
INFO Starting the aml run ''run1''...\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Using fork, process count: 16\n2024-01-12 08:53:58
+0000 6335 execution.bulk INFO Process 6335 started.\n2024-01-12
08:53:58 +0000 6351 execution.bulk INFO Process 6351 started.\n2024-01-12
08:53:58 +0000 6345 execution.bulk INFO Process 6345 started.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:2,
Process id: 6335, Line number: 0 start execution.\n2024-01-12 08:53:58 +0000 6379
execution.bulk INFO Process 6379 started.\n2024-01-12 08:53:58 +0000 6382
execution.bulk INFO Process 6382 started.\n2024-01-12 08:53:58 +0000 6387
execution.bulk INFO Process 6387 started.\n2024-01-12 08:53:58 +0000 6351
execution ERROR Node mod_two in line 1 failed. Exception: Execution
failure in ''mod_two'': (Exception) cannot mod 2!.\nTraceback (most recent
call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\npromptflow._core._errors.ToolExecutionError: Execution
failure in ''mod_two'': (Exception) cannot mod 2!\n2024-01-12 08:53:58 +0000 6362
execution.bulk INFO Process 6362 started.\n2024-01-12 08:53:58 +0000 6369
execution.bulk INFO Process 6369 started.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:6, Process id: 6351,
Line number: 1 start execution.\n2024-01-12 08:53:58 +0000 6351 execution ERROR Execution
of one node has failed. Cancelling all running nodes: mod_two.\n2024-01-12
08:53:58 +0000 6367 execution.bulk INFO Process 6367 started.\n2024-01-12
08:53:58 +0000 6398 execution.bulk INFO Process 6398 started.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:3,
Process id: 6345, Line number: 2 start execution.\n2024-01-12 08:53:58 +0000 6422
execution.bulk INFO Process 6422 started.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:8, Process id: 6379,
Line number: 3 start execution.\n2024-01-12 08:53:58 +0000 6369 execution ERROR Node
mod_two in line 7 failed. Exception: Execution failure in ''mod_two'': (Exception)
cannot mod 2!.\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\npromptflow._core._errors.ToolExecutionError: Execution
failure in ''mod_two'': (Exception) cannot mod 2!\n2024-01-12 08:53:58 +0000 6391
execution.bulk INFO Process 6391 started.\n2024-01-12 08:53:58 +0000 6367
execution ERROR Node mod_two in line 9 failed. Exception: Execution
failure in ''mod_two'': (Exception) cannot mod 2!.\nTraceback (most recent
call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\npromptflow._core._errors.ToolExecutionError: Execution
failure in ''mod_two'': (Exception) cannot mod 2!\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:9, Process id: 6382,
Line number: 4 start execution.\n2024-01-12 08:53:58 +0000 6369 execution ERROR Execution
of one node has failed. Cancelling all running nodes: mod_two.\n2024-01-12
08:53:58 +0000 6439 execution.bulk INFO Process 6439 started.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:10,
Process id: 6387, Line number: 5 start execution.\n2024-01-12 08:53:58 +0000 6367
execution ERROR Execution of one node has failed. Cancelling all
running nodes: mod_two.\n2024-01-12 08:53:58 +0000 6403 execution.bulk INFO Process
6403 started.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:7, Process id: 6362, Line number: 6 start execution.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:4,
Process id: 6369, Line number: 7 start execution.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:11, Process id: 6398,
Line number: 8 start execution.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:5, Process id: 6367, Line number: 9 start execution.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:14,
Process id: 6422, Line number: 10 start execution.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:12, Process id: 6391,
Line number: 11 start execution.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:3, Process id: 6345, Line number: 2 completed.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:6,
Process id: 6351, Line number: 1 completed.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:9, Process id: 6382,
Line number: 4 completed.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:8, Process id: 6379, Line number: 3 completed.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:2,
Process id: 6335, Line number: 0 completed.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:4, Process id: 6369,
Line number: 7 completed.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Finished
6 / 20 lines.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Finished
6 / 20 lines.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:7, Process id: 6362, Line number: 6 completed.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:11,
Process id: 6398, Line number: 8 completed.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:5, Process id: 6367,
Line number: 9 completed.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:9, Process id: 6382, Line number: 12 start execution.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:14,
Process id: 6422, Line number: 10 completed.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Finished 10 / 20 lines.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Finished 10 / 20 lines.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Finished 10 / 20 lines.\n2024-01-12 08:53:58 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:12, Process id: 6391,
Line number: 11 completed.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.11 seconds. Estimated time for incomplete
lines: 1.54 seconds.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:10, Process id: 6387, Line number: 5 completed.\n2024-01-12
08:53:58 +0000 6280 execution.bulk INFO Average execution time
for completed lines: 0.11 seconds. Estimated time for incomplete lines: 1.54
seconds.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Finished
12 / 20 lines.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Finished
12 / 20 lines.\n2024-01-12 08:53:58 +0000 6280 execution.bulk INFO Finished
12 / 20 lines.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Finished
12 / 20 lines.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.7 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.7 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.7 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:9, Process id: 6382, Line number: 12 completed.\n2024-01-12
08:53:59 +0000 6369 execution ERROR Node mod_two in line 19
failed. Exception: Execution failure in ''mod_two'': (Exception) cannot mod
2!.\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run1/mod_two.py\",
line 7, in mod_two\n raise Exception(\"cannot mod 2!\")\nException: cannot
mod 2!\n\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\npromptflow._core._errors.ToolExecutionError: Execution
failure in ''mod_two'': (Exception) cannot mod 2!\n2024-01-12 08:53:59 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:12, Process id: 6391,
Line number: 13 start execution.\n2024-01-12 08:53:59 +0000 6369 execution ERROR Execution
of one node has failed. Cancelling all running nodes: mod_two.\n2024-01-12
08:53:59 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:3,
Process id: 6345, Line number: 14 start execution.\n2024-01-12 08:53:59 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:10, Process id: 6387,
Line number: 15 start execution.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:6, Process id: 6351, Line number: 16 start execution.\n2024-01-12
08:53:59 +0000 6280 execution.bulk INFO Average execution time
for completed lines: 0.07 seconds. Estimated time for incomplete lines: 0.56
seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.56 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.56 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.56 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:8, Process id: 6379, Line number: 17 start execution.\n2024-01-12
08:53:59 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:2,
Process id: 6335, Line number: 18 start execution.\n2024-01-12 08:53:59 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:4, Process id: 6369,
Line number: 19 start execution.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:12, Process id: 6391, Line number: 13 completed.\n2024-01-12
08:53:59 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:3,
Process id: 6345, Line number: 14 completed.\n2024-01-12 08:53:59 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:10, Process id: 6387,
Line number: 15 completed.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:6, Process id: 6351, Line number: 16 completed.\n2024-01-12
08:53:59 +0000 6280 execution.bulk INFO Process name: ForkProcess-62:8,
Process id: 6379, Line number: 17 completed.\n2024-01-12 08:53:59 +0000 6280
execution.bulk INFO Finished 18 / 20 lines.\n2024-01-12 08:53:59 +0000 6280
execution.bulk INFO Process name: ForkProcess-62:2, Process id: 6335,
Line number: 18 completed.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Process
name: ForkProcess-62:4, Process id: 6369, Line number: 19 completed.\n2024-01-12
08:53:59 +0000 6280 execution.bulk INFO Finished 20 / 20 lines.\n2024-01-12
08:53:59 +0000 6280 execution.bulk INFO Finished 20 / 20 lines.\n2024-01-12
08:53:59 +0000 6280 execution.bulk INFO Finished 20 / 20 lines.\n2024-01-12
08:53:59 +0000 6280 execution.bulk INFO Average execution time
for completed lines: 0.07 seconds. Estimated time for incomplete lines: 0.14
seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Finished
20 / 20 lines.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Finished
20 / 20 lines.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.0 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.0 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.0 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.0 seconds.\n2024-01-12 08:53:59 +0000 6280 execution.bulk INFO Average
execution time for completed lines: 0.07 seconds. Estimated time for incomplete
lines: 0.0 seconds.\n2024-01-12 08:54:29 +0000 6280 execution ERROR 10/20
flow run failed, indexes: [1,3,5,7,9,11,13,15,17,19], exception of index 1:
Execution failure in ''mod_two'': (Exception) cannot mod 2!\n2024-01-12 08:54:31
+0000 6280 execution.bulk INFO Upload status summary metrics for
run run1 finished in 1.6117610009387136 seconds\n2024-01-12 08:54:31 +0000 6280
promptflow-runtime INFO Successfully write run properties {\"azureml.promptflow.total_tokens\":
0, \"_azureml.evaluate_artifacts\": \"[{\\\"path\\\": \\\"instance_results.jsonl\\\",
\\\"type\\\": \\\"table\\\"}]\"} with run id ''run1''\n2024-01-12 08:54:31
+0000 6280 execution.bulk INFO Upload RH properties for run run1
finished in 0.08309784904122353 seconds\n2024-01-12 08:54:31 +0000 6280
promptflow-runtime INFO Creating unregistered output Asset for Run run1...\n2024-01-12
08:54:31 +0000 6280 promptflow-runtime INFO Created debug_info Asset:
azureml://locations/eastus/workspaces/00000/data/azureml_run1_output_data_debug_info/versions/1\n2024-01-12
08:54:31 +0000 6280 promptflow-runtime INFO Creating unregistered output
Asset for Run run1...\n2024-01-12 08:54:31 +0000 6280 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_run1_output_data_flow_outputs/versions/1\n2024-01-12
08:54:31 +0000 6280 promptflow-runtime INFO Creating Artifact for Run
run1...\n2024-01-12 08:54:32 +0000 6280 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-12 08:54:32 +0000 6280 promptflow-runtime
INFO Patching run1...\n2024-01-12 08:54:32 +0000 6280 promptflow-runtime
WARNING [run1] Run failed. Execution stackTrace: Traceback (most recent call
last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n [REDACTED:
External StackTrace]\n\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n [REDACTED:
External StackTrace]\n\n2024-01-12 08:54:32 +0000 6280 promptflow-runtime
INFO Ending the aml run ''run1'' with status ''Completed''...\n2024-01-12
08:54:33 +0000 49 promptflow-runtime INFO Process 6280 finished\n2024-01-12
08:54:33 +0000 49 promptflow-runtime INFO [49] Child process finished!\n2024-01-12
08:54:33 +0000 49 promptflow-runtime INFO [run1] End processing bulk
run\n2024-01-12 08:54:33 +0000 49 promptflow-runtime INFO Cleanup
working dir /mnt/host/service/app/39649/requests/run1 for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '26914'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '1.063'
status:
code: 200
message: OK
- request:
body: null
headers:
Accept:
- application/json
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Type:
- application/json
User-Agent:
- promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown
Python/3.10.13 (Windows-10-10.0.22631-SP0)
method: GET
uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/run2/logContent
response:
body:
string: '"2024-01-12 08:55:18 +0000 49 promptflow-runtime INFO [run2]
Receiving v2 bulk run request fb3450a2-5971-497b-9704-9f15f2716d12: {\"flow_id\":
\"run2\", \"flow_run_id\": \"run2\", \"flow_source\": {\"flow_source_type\":
1, \"flow_source_info\": {\"snapshot_id\": \"a25bab13-d2d7-4c36-83bf-96979de95507\"},
\"flow_dag_file\": \"flow.dag.yaml\"}, \"log_path\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/ExperimentRun/dcid.run2/logs/azureml/executionlogs.txt?sv=2019-07-07&sr=b&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T07%3A44%3A44Z&ske=2024-01-13T15%3A54%3A44Z&sks=b&skv=2019-07-07&st=2024-01-12T08%3A45%3A18Z&se=2024-01-12T16%3A55%3A18Z&sp=rcw\",
\"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\",
\"data_inputs\": {\"run.outputs\": \"azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/azureml_run1_output_data_flow_outputs/versions/1\"},
\"inputs_mapping\": {\"number\": \"${run.outputs.output}\"}, \"azure_storage_setting\":
{\"azure_storage_mode\": 1, \"storage_account_name\": \"promptfloweast4063704120\",
\"blob_container_name\": \"azureml-blobstore-3e123da1-f9a5-4c91-9234-8d9ffbb39ff5\",
\"flow_artifacts_root_path\": \"promptflow/PromptFlowArtifacts/run2\", \"blob_container_sas_token\":
\"?sv=2019-07-07&sr=c&sig=**data_scrubbed**&skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2024-01-12T08%3A55%3A18Z&ske=2024-01-19T08%3A55%3A18Z&sks=b&skv=2019-07-07&se=2024-01-19T08%3A55%3A18Z&sp=racwl\",
\"output_datastore_name\": \"workspaceblobstore\"}}\n2024-01-12 08:55:18 +0000 49
promptflow-runtime INFO Runtime version: 20231204.v4. PromptFlow version:
1.2.0rc1\n2024-01-12 08:55:18 +0000 49 promptflow-runtime INFO Updating
run2 to Status.Preparing...\n2024-01-12 08:55:19 +0000 49 promptflow-runtime
INFO Downloading snapshot to /mnt/host/service/app/39649/requests/run2\n2024-01-12
08:55:19 +0000 49 promptflow-runtime INFO Get snapshot sas url for
a25bab13-d2d7-4c36-83bf-96979de95507...\n2024-01-12 08:55:25 +0000 49
promptflow-runtime INFO Downloading snapshot a25bab13-d2d7-4c36-83bf-96979de95507
from uri https://promptfloweast4063704120.blob.core.windows.net/snapshotzips/promptflow-eastus:3e123da1-f9a5-4c91-9234-8d9ffbb39ff5:snapshotzip/a25bab13-d2d7-4c36-83bf-96979de95507.zip...\n2024-01-12
08:55:25 +0000 49 promptflow-runtime INFO Downloaded file /mnt/host/service/app/39649/requests/run2/a25bab13-d2d7-4c36-83bf-96979de95507.zip
with size 515 for snapshot a25bab13-d2d7-4c36-83bf-96979de95507.\n2024-01-12
08:55:25 +0000 49 promptflow-runtime INFO Download snapshot a25bab13-d2d7-4c36-83bf-96979de95507
completed.\n2024-01-12 08:55:25 +0000 49 promptflow-runtime INFO Successfully
download snapshot to /mnt/host/service/app/39649/requests/run2\n2024-01-12
08:55:25 +0000 49 promptflow-runtime INFO About to execute a python
flow.\n2024-01-12 08:55:25 +0000 49 promptflow-runtime INFO Use spawn
method to start child process.\n2024-01-12 08:55:25 +0000 49 promptflow-runtime
INFO Starting to check process 6515 status for run run2\n2024-01-12 08:55:25
+0000 49 promptflow-runtime INFO Start checking run status for run
run2\n2024-01-12 08:55:29 +0000 6515 promptflow-runtime INFO [49--6515]
Start processing flowV2......\n2024-01-12 08:55:29 +0000 6515 promptflow-runtime
INFO Runtime version: 20231204.v4. PromptFlow version: 1.2.0rc1\n2024-01-12
08:55:29 +0000 6515 promptflow-runtime INFO Setting mlflow tracking
uri...\n2024-01-12 08:55:29 +0000 6515 promptflow-runtime INFO Validating
''AzureML Data Scientist'' user authentication...\n2024-01-12 08:55:29 +0000 6515
promptflow-runtime INFO Successfully validated ''AzureML Data Scientist''
user authentication.\n2024-01-12 08:55:30 +0000 6515 promptflow-runtime
INFO Using AzureMLRunStorageV2\n2024-01-12 08:55:30 +0000 6515 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:55:30 +0000 6515 promptflow-runtime INFO Initialized blob service
client for AzureMLRunTracker.\n2024-01-12 08:55:30 +0000 6515 promptflow-runtime
INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2024-01-12
08:55:30 +0000 6515 promptflow-runtime INFO Resolve data from url finished
in 0.5992864752188325 seconds\n2024-01-12 08:55:30 +0000 6515 promptflow-runtime
INFO Starting the aml run ''run2''...\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Using fork, process count: 10\n2024-01-12 08:55:31
+0000 6565 execution.bulk INFO Process 6565 started.\n2024-01-12
08:55:31 +0000 6570 execution.bulk INFO Process 6570 started.\n2024-01-12
08:55:31 +0000 6579 execution.bulk INFO Process 6579 started.\n2024-01-12
08:55:31 +0000 6585 execution.bulk INFO Process 6585 started.\n2024-01-12
08:55:31 +0000 6592 execution.bulk INFO Process 6592 started.\n2024-01-12
08:55:31 +0000 6570 execution ERROR Node mod_three in line
2 failed. Exception: Execution failure in ''mod_three'': (Exception) cannot
mod 3!.\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\npromptflow._core._errors.ToolExecutionError: Execution
failure in ''mod_three'': (Exception) cannot mod 3!\n2024-01-12 08:55:31 +0000 6570
execution ERROR Execution of one node has failed. Cancelling all
running nodes: mod_three.\n2024-01-12 08:55:31 +0000 6515 execution.bulk INFO Process
name: ForkProcess-64:2, Process id: 6565, Line number: 0 start execution.\n2024-01-12
08:55:31 +0000 6605 execution.bulk INFO Process 6605 started.\n2024-01-12
08:55:31 +0000 6515 execution.bulk INFO Process name: ForkProcess-64:4,
Process id: 6570, Line number: 2 start execution.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Process name: ForkProcess-64:3, Process id: 6579,
Line number: 4 start execution.\n2024-01-12 08:55:31 +0000 6515 execution.bulk INFO Process
name: ForkProcess-64:5, Process id: 6585, Line number: 6 start execution.\n2024-01-12
08:55:31 +0000 6515 execution.bulk INFO Process name: ForkProcess-64:6,
Process id: 6592, Line number: 8 start execution.\n2024-01-12 08:55:31 +0000 6622
execution.bulk INFO Process 6622 started.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Process name: ForkProcess-64:9, Process id: 6605,
Line number: 10 start execution.\n2024-01-12 08:55:31 +0000 6515 execution.bulk INFO Process
name: ForkProcess-64:10, Process id: 6622, Line number: 12 start execution.\n2024-01-12
08:55:31 +0000 6515 execution.bulk INFO Process name: ForkProcess-64:2,
Process id: 6565, Line number: 0 completed.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Finished 1 / 10 lines.\n2024-01-12 08:55:31 +0000 6614
execution.bulk INFO Process 6614 started.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Average execution time for completed lines: 0.36
seconds. Estimated time for incomplete lines: 3.24 seconds.\n2024-01-12 08:55:31
+0000 6579 execution ERROR Node mod_three in line 4 failed.
Exception: Execution failure in ''mod_three'': (Exception) cannot mod 3!.\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\npromptflow._core._errors.ToolExecutionError: Execution
failure in ''mod_three'': (Exception) cannot mod 3!\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Process name: ForkProcess-64:11, Process id: 6614,
Line number: 14 start execution.\n2024-01-12 08:55:31 +0000 6592 execution ERROR Node
mod_three in line 8 failed. Exception: Execution failure in ''mod_three'':
(Exception) cannot mod 3!.\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\npromptflow._core._errors.ToolExecutionError: Execution
failure in ''mod_three'': (Exception) cannot mod 3!\n2024-01-12 08:55:31 +0000 6579
execution ERROR Execution of one node has failed. Cancelling all
running nodes: mod_three.\n2024-01-12 08:55:31 +0000 6515 execution.bulk INFO Process
name: ForkProcess-64:2, Process id: 6565, Line number: 16 start execution.\n2024-01-12
08:55:31 +0000 6592 execution ERROR Execution of one node has
failed. Cancelling all running nodes: mod_three.\n2024-01-12 08:55:31 +0000 6565
execution ERROR Node mod_three in line 16 failed. Exception: Execution
failure in ''mod_three'': (Exception) cannot mod 3!.\nTraceback (most recent
call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n File \"/mnt/host/service/app/39649/requests/run2/mod_three.py\",
line 7, in mod_three\n raise Exception(\"cannot mod 3!\")\nException: cannot
mod 3!\n\nThe above exception was the direct cause of the following exception:\n\nTraceback
(most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 89, in invoke_tool\n result = self._invoke_tool_with_timer(node, f,
kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 196, in _invoke_tool_with_timer\n raise ToolExecutionError(node_name=node_name,
module=module) from e\npromptflow._core._errors.ToolExecutionError: Execution
failure in ''mod_three'': (Exception) cannot mod 3!\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Process name: ForkProcess-64:10, Process id: 6622,
Line number: 12 completed.\n2024-01-12 08:55:31 +0000 6565 execution ERROR Execution
of one node has failed. Cancelling all running nodes: mod_three.\n2024-01-12
08:55:31 +0000 6515 execution.bulk INFO Process name: ForkProcess-64:4,
Process id: 6570, Line number: 2 completed.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Finished 3 / 10 lines.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Finished 3 / 10 lines.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Average execution time for completed lines: 0.15
seconds. Estimated time for incomplete lines: 1.05 seconds.\n2024-01-12 08:55:31
+0000 6515 execution.bulk INFO Process name: ForkProcess-64:6,
Process id: 6592, Line number: 8 completed.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Process name: ForkProcess-64:3, Process id: 6579,
Line number: 4 completed.\n2024-01-12 08:55:31 +0000 6515 execution.bulk INFO Average
execution time for completed lines: 0.16 seconds. Estimated time for incomplete
lines: 1.12 seconds.\n2024-01-12 08:55:31 +0000 6515 execution.bulk INFO Process
name: ForkProcess-64:5, Process id: 6585, Line number: 6 completed.\n2024-01-12
08:55:31 +0000 6515 execution.bulk INFO Process name: ForkProcess-64:10,
Process id: 6622, Line number: 18 start execution.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Finished 6 / 10 lines.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Finished 6 / 10 lines.\n2024-01-12 08:55:31 +0000 6515
execution.bulk INFO Process name: ForkProcess-64:9, Process id: 6605,
Line number: 10 completed.\n2024-01-12 08:55:31 +0000 6515 execution.bulk INFO Process
name: ForkProcess-64:2, Process id: 6565, Line number: 16 completed.\n2024-01-12
08:55:31 +0000 6515 execution.bulk INFO Finished 8 / 10 lines.\n2024-01-12
08:55:31 +0000 6515 execution.bulk INFO Average execution time
for completed lines: 0.09 seconds. Estimated time for incomplete lines: 0.36
seconds.\n2024-01-12 08:55:31 +0000 6515 execution.bulk INFO Average
execution time for completed lines: 0.1 seconds. Estimated time for incomplete
lines: 0.4 seconds.\n2024-01-12 08:55:32 +0000 6515 execution.bulk INFO Finished
8 / 10 lines.\n2024-01-12 08:55:32 +0000 6515 execution.bulk INFO Finished
8 / 10 lines.\n2024-01-12 08:55:32 +0000 6515 execution.bulk INFO Average
execution time for completed lines: 0.08 seconds. Estimated time for incomplete
lines: 0.16 seconds.\n2024-01-12 08:55:32 +0000 6515 execution.bulk INFO Process
name: ForkProcess-64:11, Process id: 6614, Line number: 14 completed.\n2024-01-12
08:55:32 +0000 6515 execution.bulk INFO Process name: ForkProcess-64:10,
Process id: 6622, Line number: 18 completed.\n2024-01-12 08:55:32 +0000 6515
execution.bulk INFO Average execution time for completed lines: 0.08
seconds. Estimated time for incomplete lines: 0.16 seconds.\n2024-01-12 08:55:32
+0000 6515 execution.bulk INFO Average execution time for completed
lines: 0.09 seconds. Estimated time for incomplete lines: 0.18 seconds.\n2024-01-12
08:55:32 +0000 6515 execution.bulk INFO Finished 10 / 10 lines.\n2024-01-12
08:55:32 +0000 6515 execution.bulk INFO Finished 10 / 10 lines.\n2024-01-12
08:55:32 +0000 6515 execution.bulk INFO Average execution time
for completed lines: 0.08 seconds. Estimated time for incomplete lines: 0.0
seconds.\n2024-01-12 08:55:32 +0000 6515 execution.bulk INFO Average
execution time for completed lines: 0.08 seconds. Estimated time for incomplete
lines: 0.0 seconds.\n2024-01-12 08:56:02 +0000 6515 execution ERROR 6/10
flow run failed, indexes: [1,2,4,5,7,8], exception of index 1: Execution failure
in ''mod_three'': (Exception) cannot mod 3!\n2024-01-12 08:56:04 +0000 6515
execution.bulk INFO Upload status summary metrics for run run2 finished
in 1.3678363300859928 seconds\n2024-01-12 08:56:04 +0000 6515 promptflow-runtime
INFO Successfully write run properties {\"azureml.promptflow.total_tokens\":
0, \"_azureml.evaluate_artifacts\": \"[{\\\"path\\\": \\\"instance_results.jsonl\\\",
\\\"type\\\": \\\"table\\\"}]\"} with run id ''run2''\n2024-01-12 08:56:04
+0000 6515 execution.bulk INFO Upload RH properties for run run2
finished in 0.07642840500921011 seconds\n2024-01-12 08:56:04 +0000 6515
promptflow-runtime INFO Creating unregistered output Asset for Run run2...\n2024-01-12
08:56:04 +0000 6515 promptflow-runtime INFO Created debug_info Asset:
azureml://locations/eastus/workspaces/00000/data/azureml_run2_output_data_debug_info/versions/1\n2024-01-12
08:56:04 +0000 6515 promptflow-runtime INFO Creating unregistered output
Asset for Run run2...\n2024-01-12 08:56:05 +0000 6515 promptflow-runtime
INFO Created flow_outputs output Asset: azureml://locations/eastus/workspaces/00000/data/azureml_run2_output_data_flow_outputs/versions/1\n2024-01-12
08:56:05 +0000 6515 promptflow-runtime INFO Creating Artifact for Run
run2...\n2024-01-12 08:56:05 +0000 6515 promptflow-runtime INFO Created
instance_results.jsonl Artifact.\n2024-01-12 08:56:05 +0000 6515 promptflow-runtime
INFO Patching run2...\n2024-01-12 08:56:05 +0000 6515 promptflow-runtime
WARNING [run2] Run failed. Execution stackTrace: Traceback (most recent call
last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/flow_execution_context.py\",
line 185, in _invoke_tool_with_timer\n return f(**kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/_core/tool.py\",
line 106, in decorated_tool\n output = func(*args, **kwargs)\n [REDACTED:
External StackTrace]\n\nThe above exception was the direct cause of the following
exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 804, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs,
context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 890, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context,
inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/flow_executor.py\",
line 910, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager,
inputs, nodes, self._node_concurrency, context).execute()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 69, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 58, in execute\n self._dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File
\"/azureml-envs/prompt-flow/runtime/lib/python3.10/site-packages/promptflow/executor/_flow_nodes_scheduler.py\",
line 90, in _collect_outputs\n each_node_result = each_future.result()\n [REDACTED:
External StackTrace]\n\n2024-01-12 08:56:05 +0000 6515 promptflow-runtime
INFO Ending the aml run ''run2'' with status ''Completed''...\n2024-01-12
08:56:06 +0000 49 promptflow-runtime INFO Process 6515 finished\n2024-01-12
08:56:06 +0000 49 promptflow-runtime INFO [49] Child process finished!\n2024-01-12
08:56:06 +0000 49 promptflow-runtime INFO [run2] End processing bulk
run\n2024-01-12 08:56:06 +0000 49 promptflow-runtime INFO Cleanup
working dir /mnt/host/service/app/39649/requests/run2 for bulk run\n"'
headers:
connection:
- keep-alive
content-length:
- '22442'
content-type:
- application/json; charset=utf-8
strict-transport-security:
- max-age=15724800; includeSubDomains; preload
transfer-encoding:
- chunked
vary:
- Accept-Encoding
x-content-type-options:
- nosniff
x-request-time:
- '0.427'
status:
code: 200
message: OK
version: 1
| promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_details_against_partial_completed_run.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_details_against_partial_completed_run.yaml",
"repo_id": "promptflow",
"token_count": 175043
} | 82 |
name: flow_run_20230629_101205
description: sample bulk run
# invalid remote flow format should not be supported.
flow: invalid_remote_flow
data: ../datas/webClassification1.jsonl
column_mapping:
url: "${data.url}"
variant: ${summarize_text_content.variant_0}
# run config: env related
environment_variables: env_file
| promptflow/src/promptflow/tests/test_configs/runs/bulk_run_invalid_remote_flow_str.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/runs/bulk_run_invalid_remote_flow_str.yaml",
"repo_id": "promptflow",
"token_count": 105
} | 83 |
{
"test_tool.tool_with_generated_by_input.my_tool": {
"name": "Tool with Generated By Input",
"type": "python",
"inputs": {
"index_json": {
"type": [
"string"
],
"generated_by": {
"func_path": "test_tool.tool_with_generated_by_input.generate_index_json",
"func_kwargs": [
{
"name": "index_type",
"type": [
"string"
],
"reference": "${inputs.index_type}",
"optional": false
},
{
"name": "index",
"type": [
"string"
],
"reference": "${inputs.index}",
"optional": true
},
{
"name": "index_connection",
"type": [
"CognitiveSearchConnection"
],
"reference": "${inputs.index_connection}",
"optional": true
},
{
"name": "index_name",
"type": [
"string"
],
"reference": "${inputs.index_name}",
"optional": true
},
{
"name": "content_field",
"type": [
"string"
],
"reference": "${inputs.content_field}",
"optional": true
},
{
"name": "embedding_field",
"type": [
"string"
],
"reference": "${inputs.embedding_field}",
"optional": true
},
{
"name": "metadata_field",
"type": [
"string"
],
"reference": "${inputs.metadata_field}",
"optional": true
},
{
"name": "semantic_configuration",
"type": [
"string"
],
"reference": "${inputs.semantic_configuration}",
"optional": true
},
{
"name": "embedding_connection",
"type": [
"AzureOpenAIConnection",
"OpenAIConnection"
],
"reference": "${inputs.embedding_connection}",
"optional": true
},
{
"name": "embedding_deployment",
"type": [
"string"
],
"reference": "${inputs.embedding_deployment}",
"optional": true
}
],
"reverse_func_path": "test_tool.tool_with_generated_by_input.reverse_generate_index_json"
}
},
"queries": {
"type": [
"string"
]
},
"top_k": {
"type": [
"int"
]
},
"index_type": {
"dynamic_list": {
"func_path": "test_tool.tool_with_generated_by_input.list_index_types"
},
"type": [
"string"
],
"input_type": "uionly_hidden"
},
"index": {
"enabled_by": "index_type",
"enabled_by_value": [
"Workspace MLIndex"
],
"dynamic_list": {
"func_path": "test_tool.tool_with_generated_by_input.list_indexes"
},
"type": [
"string"
],
"input_type": "uionly_hidden"
},
"index_connection": {
"enabled_by": "index_type",
"enabled_by_value": [
"Azure Cognitive Search"
],
"type": [
"CognitiveSearchConnection"
],
"input_type": "uionly_hidden"
},
"index_name": {
"enabled_by": "index_type",
"enabled_by_value": [
"Azure Cognitive Search"
],
"type": [
"string"
],
"input_type": "uionly_hidden"
},
"content_field": {
"enabled_by": "index_type",
"enabled_by_value": [
"Azure Cognitive Search"
],
"dynamic_list": {
"func_path": "test_tool.tool_with_generated_by_input.list_fields"
},
"type": [
"string"
],
"input_type": "uionly_hidden"
},
"embedding_field": {
"enabled_by": "index_type",
"enabled_by_value": [
"Azure Cognitive Search"
],
"dynamic_list": {
"func_path": "test_tool.tool_with_generated_by_input.list_fields"
},
"type": [
"string"
],
"input_type": "uionly_hidden"
},
"metadata_field": {
"enabled_by": "index_type",
"enabled_by_value": [
"Azure Cognitive Search"
],
"dynamic_list": {
"func_path": "test_tool.tool_with_generated_by_input.list_fields"
},
"type": [
"string"
],
"input_type": "uionly_hidden"
},
"semantic_configuration": {
"enabled_by": "index_type",
"enabled_by_value": [
"Azure Cognitive Search"
],
"dynamic_list": {
"func_path": "test_tool.tool_with_generated_by_input.list_semantic_configuration"
},
"type": [
"string"
],
"input_type": "uionly_hidden"
},
"embedding_connection": {
"enabled_by": "index_type",
"enabled_by_value": [
"Azure Cognitive Search"
],
"type": [
"AzureOpenAIConnection",
"OpenAIConnection"
],
"input_type": "uionly_hidden"
},
"embedding_deployment": {
"enabled_by": "index_type",
"enabled_by_value": [
"Azure Cognitive Search"
],
"dynamic_list": {
"func_path": "test_tool.tool_with_generated_by_input.list_embedding_deployment",
"func_kwargs": [
{
"name": "embedding_connection",
"type": [
"string"
],
"reference": "${inputs.embedding_connection}",
"optional": false
}
]
},
"type": [
"string"
],
"input_type": "uionly_hidden"
}
},
"description": "This is a tool with generated by input",
"module": "test_tool.tool_with_generated_by_input",
"function": "my_tool"
}
} | promptflow/src/promptflow/tests/test_configs/tools/expected_generated_by_meta.json/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/tools/expected_generated_by_meta.json",
"repo_id": "promptflow",
"token_count": 3786
} | 84 |
inputs:
num:
type: int
outputs:
content:
type: string
reference: ${divide_num.output}
nodes:
- name: divide_num
type: python
source:
type: code
path: divide_num.py
inputs:
num: ${inputs.num}
- name: divide_num_1
type: python
source:
type: code
path: divide_num.py
inputs:
num: ${divide_num.output}
- name: divide_num_2
type: python
source:
type: code
path: divide_num.py
inputs:
num: ${divide_num_3.output}
| promptflow/src/promptflow/tests/test_configs/wrong_flows/node_reference_not_found/flow.dag.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/wrong_flows/node_reference_not_found/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 209
} | 85 |
# Run prompt flow in Azure AI
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../../how-to-guides/faq.md#stable-vs-experimental).
:::
Assuming you have learned how to create and run a flow following [Quick start](../../how-to-guides/quick-start.md). This guide will walk you through the main process of how to submit a promptflow run to [Azure AI](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/overview-what-is-prompt-flow?view=azureml-api-2).
Benefits of use Azure AI comparison to just run locally:
- **Designed for team collaboration**: Portal UI is a better fix for sharing & presentation your flow and runs. And workspace can better organize team shared resources like connections.
- **Enterprise Readiness Solutions**: prompt flow leverages Azure AI's robust enterprise readiness solutions, providing a secure, scalable, and reliable foundation for the development, experimentation, and deployment of flows.
## Prerequisites
1. An Azure account with an active subscription - [Create an account for free](https://azure.microsoft.com/free/?WT.mc_id=A261C142F)
2. An Azure AI ML workspace - [Create workspace resources you need to get started with Azure AI](https://learn.microsoft.com/en-us/azure/machine-learning/quickstart-create-resources).
3. A python environment, `python=3.9` or higher version like 3.10 is recommended.
4. Install `promptflow` with extra dependencies and `promptflow-tools`.
```sh
pip install promptflow[azure] promptflow-tools
```
5. Clone the sample repo and check flows in folder [examples/flows](https://github.com/microsoft/promptflow/tree/main/examples/flows).
```sh
git clone https://github.com/microsoft/promptflow.git
```
## Create necessary connections
Connection helps securely store and manage secret keys or other sensitive credentials required for interacting with LLM and other external tools for example Azure Content Safety.
In this guide, we will use flow `web-classification` which uses connection `open_ai_connection` inside, we need to set up the connection if we haven't added it before.
Please go to workspace portal, click `Prompt flow` -> `Connections` -> `Create`, then follow the instruction to create your own connections. Learn more on [connections](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/concept-connections?view=azureml-api-2).
## Submit a run to workspace
Assuming you are in working directory `<path-to-the-sample-repo>/examples/flows/standard/`
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
Use `az login` to login so promptflow can get your credential.
```sh
az login
```
Submit a run to workspace.
```sh
pfazure run create --subscription <my_sub> -g <my_resource_group> -w <my_workspace> --flow web-classification --data web-classification/data.jsonl --stream
```
**Default subscription/resource-group/workspace**
Note `--subscription`, `-g` and `-w` can be omitted if you have installed the [Azure CLI](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli) and [set the default configurations](https://learn.microsoft.com/en-us/cli/azure/azure-cli-configuration).
```sh
az account set --subscription <my-sub>
az configure --defaults group=<my_resource_group> workspace=<my_workspace>
```
**Serverless runtime and named runtime**
Runtimes serve as computing resources so that the flow can be executed in workspace. Above command does not specify any runtime which means it will run in serverless mode. In this mode the workspace will automatically create a runtime and you can use it as the default runtime for any flow run later.
Instead, you can also [create a runtime](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/how-to-create-manage-runtime?view=azureml-api-2) and use it with `--runtime <my-runtime>`:
```sh
pfazure run create --flow web-classification --data web-classification/data.jsonl --stream --runtime <my-runtime>
```
**Specify run name and view a run**
You can also name the run by specifying `--name my_first_cloud_run` in the run create command, otherwise the run name will be generated in a certain pattern which has timestamp inside.
With a run name, you can easily stream or view the run details using below commands:
```sh
pfazure run stream -n my_first_cloud_run # same as "--stream" in command "run create"
pfazure run show-details -n my_first_cloud_run
pfazure run visualize -n my_first_cloud_run
```
More details can be found in [CLI reference: pfazure](../../reference/pfazure-command-reference.md)
:::
:::{tab-item} SDK
:sync: SDK
1. Import the required libraries
```python
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
# azure version promptflow apis
from promptflow.azure import PFClient
```
2. Get credential
```python
try:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception as ex:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
credential = InteractiveBrowserCredential()
```
3. Get a handle to the workspace
```python
# Get a handle to workspace
pf = PFClient(
credential=credential,
subscription_id="<SUBSCRIPTION_ID>", # this will look like xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
resource_group_name="<RESOURCE_GROUP>",
workspace_name="<AML_WORKSPACE_NAME>",
)
```
4. Submit the flow run
```python
# load flow
flow = "web-classification"
data = "web-classification/data.jsonl"
runtime = "example-runtime-ci" # assume you have existing runtime with this name provisioned
# runtime = None # un-comment use automatic runtime
# create run
base_run = pf.run(
flow=flow,
data=data,
runtime=runtime,
)
pf.stream(base_run)
```
5. View the run info
```python
details = pf.get_details(base_run)
details.head(10)
pf.visualize(base_run)
```
:::
::::
## View the run in workspace
At the end of stream logs, you can find the `portal_url` of the submitted run, click it to view the run in the workspace.

### Run snapshot of the flow with additional includes
Flows that enabled [additional include](../../how-to-guides/develop-a-flow/referencing-external-files-or-folders-in-a-flow.md) files can also be submitted for execution in the workspace. Please note that the specific additional include files or folders will be uploaded and organized within the **Files** folder of the run snapshot in the cloud.

## Next steps
Learn more about:
- [CLI reference: pfazure](../../reference/pfazure-command-reference.md)
| promptflow/docs/cloud/azureai/quick-start.md/0 | {
"file_path": "promptflow/docs/cloud/azureai/quick-start.md",
"repo_id": "promptflow",
"token_count": 2002
} | 0 |
# Deploy a flow using Kubernetes
:::{admonition} Experimental feature
This is an experimental feature, and may change at any time. Learn [more](../faq.md#stable-vs-experimental).
:::
There are four steps to deploy a flow using Kubernetes:
1. Build the flow as docker format.
2. Build the docker image.
3. Create Kubernetes deployment yaml.
4. Apply the deployment.
## Build a flow as docker format
::::{tab-set}
:::{tab-item} CLI
:sync: CLI
Note that all dependent connections must be created before building as docker.
```bash
# create connection if not created before
pf connection create --file ../../../examples/connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Use the command below to build a flow as docker format:
```bash
pf flow build --source <path-to-your-flow-folder> --output <your-output-dir> --format docker
```
:::
:::{tab-item} VS Code Extension
:sync: VSC
Click the button below to build a flow as docker format:

:::
::::
Note that all dependent connections must be created before exporting as docker.
### Docker format folder structure
Exported Dockerfile & its dependencies are located in the same folder. The structure is as below:
- flow: the folder contains all the flow files
- ...
- connections: the folder contains yaml files to create all related connections
- ...
- Dockerfile: the dockerfile to build the image
- start.sh: the script used in `CMD` of `Dockerfile` to start the service
- runit: the folder contains all the runit scripts
- ...
- settings.json: a json file to store the settings of the docker image
- README.md: Simple introduction of the files
## Deploy with Kubernetes
We are going to use the [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification/) as
an example to show how to deploy with Kubernetes.
Please ensure you have [create the connection](../manage-connections.md#create-a-connection) required by flow, if not, you could
refer to [Setup connection for web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification).
Additionally, please ensure that you have installed all the required dependencies. You can refer to the "Prerequisites" section in the README of the [web-classification](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/web-classification/) for a comprehensive list of prerequisites and installation instructions.
### Build Docker image
Like other Dockerfile, you need to build the image first. You can tag the image with any name you want. In this example, we use `web-classification-serve`.
Then run the command below:
```bash
cd <your-output-dir>
docker build . -t web-classification-serve
```
### Create Kubernetes deployment yaml.
The Kubernetes deployment yaml file acts as a guide for managing your docker container in a Kubernetes pod. It clearly specifies important information like the container image, port configurations, environment variables, and various settings. Below, you'll find a simple deployment template that you can easily customize to meet your needs.
**Note**: You need encode the secret using base64 firstly and input the <encoded_secret> as 'open-ai-connection-api-key' in the deployment configuration. For example, you can run below commands in linux:
```bash
encoded_secret=$(echo -n <your_api_key> | base64)
```
```yaml
---
kind: Namespace
apiVersion: v1
metadata:
name: <your-namespace>
---
apiVersion: v1
kind: Secret
metadata:
name: open-ai-connection-api-key
namespace: <your-namespace>
type: Opaque
data:
open-ai-connection-api-key: <encoded_secret>
---
apiVersion: v1
kind: Service
metadata:
name: web-classification-service
namespace: <your-namespace>
spec:
type: NodePort
ports:
- name: http
port: 8080
targetPort: 8080
nodePort: 30123
selector:
app: web-classification-serve-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-classification-serve-app
namespace: <your-namespace>
spec:
selector:
matchLabels:
app: web-classification-serve-app
template:
metadata:
labels:
app: web-classification-serve-app
spec:
containers:
- name: web-classification-serve-container
image: <your-docker-image>
imagePullPolicy: Never
ports:
- containerPort: 8080
env:
- name: OPEN_AI_CONNECTION_API_KEY
valueFrom:
secretKeyRef:
name: open-ai-connection-api-key
key: open-ai-connection-api-key
```
### Apply the deployment.
Before you can deploy your application, ensure that you have set up a Kubernetes cluster and installed [kubectl](https://kubernetes.io/docs/reference/kubectl/) if it's not already installed. In this documentation, we will use [Minikube](https://minikube.sigs.k8s.io/docs/) as an example. To start the cluster, execute the following command:
```bash
minikube start
```
Once your Kubernetes cluster is up and running, you can proceed to deploy your application by using the following command:
```bash
kubectl apply -f deployment.yaml
```
This command will create the necessary pods to run your application within the cluster.
**Note**: You need replace <pod_name> below with your specific pod_name. You can retrieve it by running `kubectl get pods -n web-classification`.
### Retrieve flow service logs of the container
The kubectl logs command is used to retrieve the logs of a container running within a pod, which can be useful for debugging, monitoring, and troubleshooting applications deployed in a Kubernetes cluster.
```bash
kubectl -n <your-namespace> logs <pod-name>
```
#### Connections
If the service involves connections, all related connections will be exported as yaml files and recreated in containers.
Secrets in connections won't be exported directly. Instead, we will export them as a reference to environment variables:
```yaml
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/OpenAIConnection.schema.json
type: open_ai
name: open_ai_connection
module: promptflow.connections
api_key: ${env:OPEN_AI_CONNECTION_API_KEY} # env reference
```
You'll need to set up the environment variables in the container to make the connections work.
### Test the endpoint
- Option1:
Once you've started the service, you can establish a connection between a local port and a port on the pod. This allows you to conveniently test the endpoint from your local terminal.
To achieve this, execute the following command:
```bash
kubectl port-forward <pod_name> <local_port>:<container_port> -n <your-namespace>
```
With the port forwarding in place, you can use the curl command to initiate the endpoint test:
```bash
curl http://localhost:<local_port>/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
- Option2:
`minikube service web-classification-service --url -n <your-namespace>` runs as a process, creating a tunnel to the cluster. The command exposes the service directly to any program running on the host operating system.
The command above will retrieve the URL of a service running within a Minikube Kubernetes cluster (e.g. http://<ip>:<assigned_port>), which you can click to interact with the flow service in your web browser. Alternatively, you can use the following command to test the endpoint:
**Note**: Minikube will use its own external port instead of nodePort to listen to the service. So please substitute <assigned_port> with the port obtained above.
```bash
curl http://localhost:<assigned_port>/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
## Next steps
- Try the example [here](https://github.com/microsoft/promptflow/tree/main/examples/tutorials/flow-deploy/kubernetes). | promptflow/docs/how-to-guides/deploy-a-flow/deploy-using-kubernetes.md/0 | {
"file_path": "promptflow/docs/how-to-guides/deploy-a-flow/deploy-using-kubernetes.md",
"repo_id": "promptflow",
"token_count": 2398
} | 1 |
# Using File Path as Tool Input
Users sometimes need to reference local files within a tool to implement specific logic. To simplify this, we've introduced the `FilePath` input type. This input type enables users to either select an existing file or create a new one, then pass it to a tool, allowing the tool to access the file's content.
In this guide, we will provide a detailed walkthrough on how to use `FilePath` as a tool input. We will also demonstrate the user experience when utilizing this type of tool within a flow.
## Prerequisites
- Please install promptflow package and ensure that its version is 0.1.0b8 or later.
```
pip install promptflow>=0.1.0b8
```
- Please ensure that your [Prompt flow for VS Code](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow) is updated to version 1.1.0 or later.
## Using File Path as Package Tool Input
### How to create a package tool with file path input
Here we use [an existing tool package](https://github.com/microsoft/promptflow/tree/main/examples/tools/tool-package-quickstart/my_tool_package) as an example. If you want to create your own tool, please refer to [create and use tool package](create-and-use-tool-package.md#create-custom-tool-package).
1. Add a `FilePath` input for your tool, like in [this example](https://github.com/microsoft/promptflow/blob/main/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_file_path_input.py).
```python
import importlib
from pathlib import Path
from promptflow import tool
# 1. import the FilePath type
from promptflow.contracts.types import FilePath
# 2. add a FilePath input for your tool method
@tool
def my_tool(input_file: FilePath, input_text: str) -> str:
# 3. customise your own code to handle and use the input_file here
new_module = importlib.import_module(Path(input_file).stem)
return new_module.hello(input_text)
```
2. `FilePath` input format in a tool YAML, like in [this example](https://github.com/microsoft/promptflow/blob/main/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_file_path_input.yaml).
```yaml
my_tool_package.tools.tool_with_file_path_input.my_tool:
function: my_tool
inputs:
# yaml format for FilePath input
input_file:
type:
- file_path
input_text:
type:
- string
module: my_tool_package.tools.tool_with_file_path_input
name: Tool with FilePath Input
description: This is a tool to demonstrate the usage of FilePath input
type: python
```
> [!Note] tool yaml file can be generated using a python script. For further details, please refer to [create custom tool package](create-and-use-tool-package.md#create-custom-tool-package).
### Use tool with a file path input in VS Code extension
Follow steps to [build and install your tool package](create-and-use-tool-package.md#build-and-share-the-tool-package) and [use your tool from VS Code extension](create-and-use-tool-package.md#use-your-tool-from-vscode-extension).
Here we use an existing flow to demonstrate the experience, open [this flow](https://github.com/microsoft/promptflow/blob/main/examples/tools/use-cases/filepath-input-tool-showcase/flow.dag.yaml) in VS Code extension:
- There is a node named "Tool_with_FilePath_Input" with a `file_path` type input called `input_file`.
- Click the picker icon to open the UI for selecting an existing file or creating a new file to use as input.

## Using File Path as Script Tool Input
We can also utilize the `FilePath` input type directly in a script tool, eliminating the need to create a package tool.
1. Initiate an empty flow in the VS Code extension and add a python node titled 'python_node_with_filepath' into it in the Visual Editor page.
2. Select the link `python_node_with_filepath.py` in the node to modify the python method to include a `FilePath` input as shown below, and save the code change.
```python
import importlib
from pathlib import Path
from promptflow import tool
# 1. import the FilePath type
from promptflow.contracts.types import FilePath
# 2. add a FilePath input for your tool method
@tool
def my_tool(input_file: FilePath, input_text: str) -> str:
# 3. customise your own code to handle and use the input_file here
new_module = importlib.import_module(Path(input_file).stem)
return new_module.hello(input_text)
```
3. Return to the flow Visual Editor page, click the picker icon to launch the UI for selecting an existing file or creating a new file to use as input, here we select [this file](https://github.com/microsoft/promptflow/blob/main/examples/tools/use-cases/filepath-input-tool-showcase/hello_method.py) as an example.

## FAQ
### What are some practical use cases for this feature?
The `FilePath` input enables several useful workflows:
1. **Dynamically load modules** - As shown in the demo, you can load a Python module from a specific script file selected by the user. This allows flexible custom logic.
2. **Load arbitrary data files** - The tool can load data from files like .csv, .txt, .json, etc. This provides an easy way to inject external data into a tool.
So in summary, `FilePath` input gives tools flexible access to external files provided by users at runtime. This unlocks many useful scenarios like the ones above.
| promptflow/docs/how-to-guides/develop-a-tool/use-file-path-as-tool-input.md/0 | {
"file_path": "promptflow/docs/how-to-guides/develop-a-tool/use-file-path-as-tool-input.md",
"repo_id": "promptflow",
"token_count": 1804
} | 2 |
# Alternative LLMs
This section provides tutorials on incorporating alternative large language models into prompt flow.
```{toctree}
:maxdepth: 1
:hidden:
``` | promptflow/docs/integrations/llms/index.md/0 | {
"file_path": "promptflow/docs/integrations/llms/index.md",
"repo_id": "promptflow",
"token_count": 43
} | 3 |
# Python
## Introduction
Users are empowered by the Python Tool to offer customized code snippets as self-contained executable nodes in PromptFlow.
Users can effortlessly create Python tools, edit code, and verify results with ease.
## Inputs
| Name | Type | Description | Required |
|--------|--------|------------------------------------------------------|---------|
| Code | string | Python code snippet | Yes |
| Inputs | - | List of tool function parameters and its assignments | - |
### Types
| Type | Python example | Description |
|-----------------------------------------------------|---------------------------------|--------------------------------------------|
| int | param: int | Integer type |
| bool | param: bool | Boolean type |
| string | param: str | String type |
| double | param: float | Double type |
| list | param: list or param: List[T] | List type |
| object | param: dict or param: Dict[K, V] | Object type |
| [Connection](../../concepts/concept-connections.md) | param: CustomConnection | Connection type, will be handled specially |
Parameters with `Connection` type annotation will be treated as connection inputs, which means:
- Promptflow extension will show a selector to select the connection.
- During execution time, promptflow will try to find the connection with the name same from parameter value passed in.
Note that `Union[...]` type annotation is supported **ONLY** for connection type,
for example, `param: Union[CustomConnection, OpenAIConnection]`.
## Outputs
The return of the python tool function.
## How to write Python Tool?
### Guidelines
1. Python Tool Code should consist of a complete Python code, including any necessary module imports.
2. Python Tool Code must contain a function decorated with @tool (tool function), serving as the entry point for execution. The @tool decorator should be applied only once within the snippet.
_Below sample defines python tool "my_python_tool", decorated with @tool_
3. Python tool function parameters must be assigned in 'Inputs' section
_Below sample defines inputs "message" and assign with "world"_
4. Python tool function shall have return
_Below sample returns a concatenated string_
### Code
The snippet below shows the basic structure of a tool function. Promptflow will read the function and extract inputs
from function parameters and type annotations.
```python
from promptflow import tool
from promptflow.connections import CustomConnection
# The inputs section will change based on the arguments of the tool function, after you save the code
# Adding type to arguments and return value will help the system show the types properly
# Please update the function name/signature per need
@tool
def my_python_tool(message: str, my_conn: CustomConnection) -> str:
my_conn_dict = dict(my_conn)
# Do some function call with my_conn_dict...
return 'hello ' + message
```
### Inputs
| Name | Type | Sample Value in Flow Yaml | Value passed to function|
|---------|--------|-------------------------| ------------------------|
| message | string | "world" | "world" |
| my_conn | CustomConnection | "my_conn" | CustomConnection object |
Promptflow will try to find the connection named 'my_conn' during execution time.
### outputs
```python
"hello world"
```
### Keyword Arguments Support
Starting from version 1.0.0 of PromptFlow and version 1.4.0 of [Prompt flow for VS Code](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow),
we have introduced support for keyword arguments (kwargs) in the Python tool.
```python
from promptflow import tool
@tool
def print_test(normal_input: str, **kwargs):
for key, value in kwargs.items():
print(f"Key {key}'s value is {value}")
return len(kwargs)
```
When you add `kwargs` in your python tool like above code, you can insert variable number of inputs by the `+Add input` button.
 | promptflow/docs/reference/tools-reference/python-tool.md/0 | {
"file_path": "promptflow/docs/reference/tools-reference/python-tool.md",
"repo_id": "promptflow",
"token_count": 1821
} | 4 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/SerpConnection.schema.json
name: serp_connection
type: serp
api_key: "<to-be-replaced>"
| promptflow/examples/connections/serp.yml/0 | {
"file_path": "promptflow/examples/connections/serp.yml",
"repo_id": "promptflow",
"token_count": 59
} | 5 |
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
PDF_DIR = os.path.join(BASE_DIR, ".pdfs")
INDEX_DIR = os.path.join(BASE_DIR, ".index/.pdfs/")
| promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/constants.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/chat_with_pdf/constants.py",
"repo_id": "promptflow",
"token_count": 73
} | 6 |
import unittest
import os
import time
import traceback
class BaseTest(unittest.TestCase):
def setUp(self):
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../")
self.flow_path = os.path.join(root, "chat-with-pdf")
self.data_path = os.path.join(
self.flow_path, "data/bert-paper-qna-3-line.jsonl"
)
self.eval_groundedness_flow_path = os.path.join(
root, "../evaluation/eval-groundedness"
)
self.eval_perceived_intelligence_flow_path = os.path.join(
root, "../evaluation/eval-perceived-intelligence"
)
self.all_runs_generated = []
self.config_3k_context = {
"EMBEDDING_MODEL_DEPLOYMENT_NAME": "text-embedding-ada-002",
"CHAT_MODEL_DEPLOYMENT_NAME": "gpt-35-turbo",
"PROMPT_TOKEN_LIMIT": 3000,
"MAX_COMPLETION_TOKENS": 256,
"VERBOSE": True,
"CHUNK_SIZE": 1024,
"CHUNK_OVERLAP": 64,
}
self.config_2k_context = {
"EMBEDDING_MODEL_DEPLOYMENT_NAME": "text-embedding-ada-002",
"CHAT_MODEL_DEPLOYMENT_NAME": "gpt-35-turbo",
"PROMPT_TOKEN_LIMIT": 2000,
"MAX_COMPLETION_TOKENS": 256,
"VERBOSE": True,
"CHUNK_SIZE": 1024,
"CHUNK_OVERLAP": 64,
}
# Switch current working directory to the folder of this file
self.cwd = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def tearDown(self):
# Switch back to the original working directory
os.chdir(self.cwd)
for run in self.all_runs_generated:
try:
self.pf.runs.archive(run.name)
except Exception as e:
print(e)
traceback.print_exc()
def create_chat_run(
self,
data=None,
column_mapping=None,
connections=None,
display_name="chat_run",
stream=True,
):
if column_mapping is None:
column_mapping = {
"chat_history": "${data.chat_history}",
"pdf_url": "${data.pdf_url}",
"question": "${data.question}",
"config": self.config_2k_context,
}
data = self.data_path if data is None else data
run = self.pf.run(
flow=self.flow_path,
data=data,
column_mapping=column_mapping,
connections=connections,
display_name=display_name,
tags={"unittest": "true"},
stream=stream,
)
self.all_runs_generated.append(run)
self.check_run_basics(run, display_name)
return run
def create_eval_run(
self,
eval_flow_path,
base_run,
column_mapping,
connections=None,
display_name_postfix="",
):
display_name = eval_flow_path.split("/")[-1] + display_name_postfix
eval = self.pf.run(
flow=eval_flow_path,
run=base_run,
column_mapping=column_mapping,
connections=connections,
display_name=display_name,
tags={"unittest": "true"},
stream=True,
)
self.all_runs_generated.append(eval)
self.check_run_basics(eval, display_name)
return eval
def check_run_basics(self, run, display_name=None):
self.assertTrue(run is not None)
if display_name is not None:
self.assertTrue(run.display_name.find(display_name) != -1)
self.assertEqual(run.tags["unittest"], "true")
def run_eval_with_config(self, config: dict, display_name: str = None):
run = self.create_chat_run(
column_mapping={
"question": "${data.question}",
"pdf_url": "${data.pdf_url}",
"chat_history": "${data.chat_history}",
"config": config,
},
display_name=display_name,
)
self.pf.stream(run) # wait for completion
self.check_run_basics(run)
eval_groundedness = self.create_eval_run(
self.eval_groundedness_flow_path,
run,
{
"question": "${run.inputs.question}",
"answer": "${run.outputs.answer}",
"context": "${run.outputs.context}",
},
display_name_postfix="_" + display_name,
)
self.pf.stream(eval_groundedness) # wait for completion
self.check_run_basics(eval_groundedness)
details = self.pf.get_details(eval_groundedness)
self.assertGreater(details.shape[0], 2)
metrics, elapsed = self.wait_for_metrics(eval_groundedness)
self.assertGreaterEqual(metrics["groundedness"], 0.0)
self.assertLessEqual(elapsed, 5) # metrics should be available within 5 seconds
eval_pi = self.create_eval_run(
self.eval_perceived_intelligence_flow_path,
run,
{
"question": "${run.inputs.question}",
"answer": "${run.outputs.answer}",
"context": "${run.outputs.context}",
},
display_name_postfix="_" + display_name,
)
self.pf.stream(eval_pi) # wait for completion
self.check_run_basics(eval_pi)
details = self.pf.get_details(eval_pi)
self.assertGreater(details.shape[0], 2)
metrics, elapsed = self.wait_for_metrics(eval_pi)
self.assertGreaterEqual(metrics["perceived_intelligence_score"], 0.0)
self.assertLessEqual(elapsed, 5) # metrics should be available within 5 seconds
return run, eval_groundedness, eval_pi
def wait_for_metrics(self, run):
start = time.time()
metrics = self.pf.get_metrics(run)
cnt = 3
while len(metrics) == 0 and cnt > 0:
time.sleep(5)
metrics = self.pf.get_metrics(run)
cnt -= 1
end = time.time()
return metrics, end - start
| promptflow/examples/flows/chat/chat-with-pdf/tests/base_test.py/0 | {
"file_path": "promptflow/examples/flows/chat/chat-with-pdf/tests/base_test.py",
"repo_id": "promptflow",
"token_count": 3126
} | 7 |
from typing import List
from promptflow import tool
@tool
def aggregate(groundedness_scores: List[float]):
"""
This tool aggregates the processed result of all lines to the variant level and log metric for each variant.
:param processed_results: List of the output of line_process node.
:param variant_ids: List of variant ids that can be used to group the results by variant.
:param line_numbers: List of line numbers of the variants. If provided, this can be used to
group the results by line number.
"""
aggregated_results = {"groundedness": 0.0, "count": 0}
# Calculate average groundedness score for each variant
for i in range(len(groundedness_scores)):
aggregated_results["groundedness"] += groundedness_scores[i]
aggregated_results["count"] += 1
aggregated_results["groundedness"] /= aggregated_results["count"]
# Log metric for each variant
from promptflow import log_metric
log_metric(key="groundedness", value=aggregated_results["groundedness"])
return aggregated_results
| promptflow/examples/flows/evaluation/eval-groundedness/aggregate.py/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-groundedness/aggregate.py",
"repo_id": "promptflow",
"token_count": 350
} | 8 |
from promptflow import tool
import numpy as np
import re
@tool
def concat_results(gpt_coherence_score: str = None,
gpt_similarity_score: str = None,
gpt_fluency_score: str = None,
gpt_relevance_score: str = None,
gpt_groundedness_score: str = None,
f1_score: float = None,
ada_cosine_similarity: float = None):
load_list = [{'name': 'gpt_coherence', 'score': gpt_coherence_score},
{'name': 'gpt_similarity', 'score': gpt_similarity_score},
{'name': 'gpt_fluency', 'score': gpt_fluency_score},
{'name': 'gpt_relevance', 'score': gpt_relevance_score},
{'name': 'gpt_groundedness', 'score': gpt_groundedness_score},
{'name': 'f1_score', 'score': f1_score},
{'name': 'ada_similarity', 'score': ada_cosine_similarity}]
scalar_metrics = ["f1_score", "ada_similarity"]
score_list = []
errors = []
for item in load_list:
if item["name"] in scalar_metrics:
try:
score = float(item["score"])
except Exception as e:
score = np.nan
errors.append({"name": item["name"], "msg": str(e), "data": item["score"]})
else:
if item['score']:
try:
score = item["score"]
match = re.search(r'\d', score)
if match:
score = float(match.group())
else:
score = np.nan
except Exception as e:
score = np.nan
errors.append({"name": item["name"], "msg": str(e), "data": item["score"]})
else:
score = np.nan
score_list.append({"name": item["name"], "score": score})
variant_level_result = {}
for item in score_list:
item_name = str(item["name"])
variant_level_result[item_name] = item["score"]
if 'gpt' in item_name:
variant_level_result[item_name + '_pass_rate'] = 1 if item["score"] > 3 else 0
return variant_level_result
| promptflow/examples/flows/evaluation/eval-qna-non-rag/concat_scores.py/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-qna-non-rag/concat_scores.py",
"repo_id": "promptflow",
"token_count": 1177
} | 9 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
metrics:
type: string
default: gpt_groundedness,gpt_relevance,gpt_retrieval_score
is_chat_input: false
answer:
type: string
default: Of the tents mentioned in the retrieved documents, the Alpine Explorer
Tent has the highest waterproof rating of 3000mm for its rainfly.
is_chat_input: false
question:
type: string
default: Which tent is the most waterproof?
is_chat_input: false
documents:
type: string
default: "{\"documents\": [{\"content\":\"<h1
id=\\\"information-about-product-item_number-1\\\">Information about
product item_number: 1</h1>\\n<p>TrailMaster X4 Tent, price
$250,</p>\\n<h2 id=\\\"brand\\\">Brand</h2>\\n<p>OutdoorLiving</p>\\n<h2
id=\\\"category\\\">Category</h2>\\n<p>Tents</p>\\n<h2
id=\\\"features\\\">Features</h2>\\n<ul>\\n<li>Polyester material for
durability</li>\\n<li>Spacious interior to accommodate multiple
people</li>\\n<li>Easy setup with included
instructions</li>\\n<li>Water-resistant construction to withstand light
rain</li>\\n<li>Mesh panels for ventilation and insect
protection</li>\\n<li>Rainfly included for added weather
protection</li>\\n<li>Multiple doors for convenient entry and
exit</li>\\n<li>Interior pockets for organizing small
items</li>\\n<li>Reflective guy lines for improved visibility at
night</li>\\n<li>Freestanding design for easy setup and
relocation</li>\\n<li>Carry bag included for convenient storage and
transportation</li>\\n</ul>\\n<h2 id=\\\"technical-specs\\\">Technical
Specs</h2>\\n<p><strong>Best Use</strong>: Camping<br
/>\\n<strong>Capacity</strong>: 4-person<br />\\n<strong>Season
Rating</strong>: 3-season<br />\\n<strong>Setup</strong>: Freestanding<br
/>\\n<strong>Material</strong>: Polyester<br
/>\\n<strong>Waterproof</strong>: Yes<br />\\n<strong>Floor Area</strong>:
80 square feet<br />\\n<strong>Peak Height</strong>: 6 feet<br
/>\\n<strong>Number of Doors</strong>: 2<br />\\n<strong>Color</strong>:
Green<br />\\n<strong>Rainfly</strong>: Included<br />\\n<strong>Rainfly
Waterproof Rating</strong>: 2000mm<br />\\n<strong>Tent Poles</strong>:
Aluminum<br />\\n<strong>Pole Diameter</strong>: 9mm<br
/>\\n<strong>Ventilation</strong>: Mesh panels and adjustable vents<br
/>\\n<strong>Interior Pockets</strong>: Yes (4 pockets)<br
/>\\n<strong>Gear Loft</strong>: Included<br
/>\\n<strong>Footprint</strong>: Sold separately<br />\\n<strong>Guy
Lines</strong>: Reflective<br />\\n<strong>Stakes</strong>: Aluminum<br
/>\\n<strong>Carry Bag</strong>: Included<br
/>\\n<strong>Dimensions</strong>: 10ft x 8ft x 6ft (length x width x peak
height)<br />\\n<strong>Packed Size</strong>: 24 inches x 8 inches<br
/>\\n<strong>Weight</strong>: 12 lbs </p>\\n<h2
id=\\\"trailmaster-x4-tent-user-guide\\\">TrailMaster X4 Tent User
Guide</h2>\\n<h3 id=\\\"introduction\\\">Introduction</h3>\\n<p>Thank you
for choosing the TrailMaster X4 Tent. This user guide provides
instructions on how to set up, use, and maintain your tent effectively.
Please read this guide thoroughly before using the tent.</p>\\n<h3
id=\\\"package-contents\\\">Package Contents</h3>\\n<p>Ensure that the
package includes the following components:</p>\\n<ul>\\n<li>TrailMaster X4
Tent body</li>\\n<li>Tent poles</li>\\n<li>Rainfly (if
applicable)</li>\\n<li>Stakes and guy lines</li>\\n<li>Carry
bag</li>\\n<li>User Guide</li>\\n</ul>\\n<p>If any components are missing
or damaged, please contact our customer support immediately.</p>\\n<h3
id=\\\"tent-setup\\\">Tent Setup</h3>\\n<h4
id=\\\"step-1-selecting-a-suitable-location\\\">Step 1: Selecting a
Suitable Location</h4>\\n<ul>\\n<li>Find a level and clear area for
pitching the tent.</li>\\n<li>Remove any sharp objects or debris that
could damage the tent floor.</li>\\n</ul>\\n<h4
id=\\\"step-2-unpacking-and-organizing-components\\\">Step 2: Unpacking
and Organizing Components</h4>\\n<ul>\\n<li>Lay out all the tent
components on the ground.</li>\\n<li>Familiarize yourself with each part,
including the tent body, poles, rainfly, stakes, and guy
lines.</li>\\n</ul>\\n<h4 id=\\\"step-3-assembling-the-tent-poles\\\">Step
3: Assembling the Tent Poles</h4>\\n<ul>\\n<li>Connect the tent poles
according to their designated color codes or numbering.</li>\\n<li>Slide
the connected poles through the pole sleeves or attach them to the tent
body clips.</li>\\n</ul>\\n<h4
id=\\\"step-4-setting-up-the-tent-body\\\">Step 4: Setting up the Tent
Body</h4>\\n<ul>\\n<li>Begin at one end and raise the tent body by pushing
up the poles.</li>\\n<li>Ensure that the tent body is evenly stretched and
centered.</li>\\n<li>Secure the tent body to the ground using stakes and
guy lines as needed.</li>\\n</ul>\\n<h4
id=\\\"step-5-attaching-the-rainfly-if-applicable\\\">Step 5: Attaching
the Rainfly (if applicable)</h4>\\n<ul>\\n<li>If your tent includes a
rainfly, spread it over the tent body.</li>\\n<li>Attach the rainfly to
the tent corners and secure it with the provided buckles or
clips.</li>\\n<li>Adjust the tension of the rainfly to ensure proper
airflow and weather protection.</li>\\n</ul>\\n<h4
id=\\\"step-6-securing-the-tent\\\">Step 6: Securing the
Tent</h4>\\n<ul>\\n<li>Stake down the tent corners and guy out the guy
lines for additional stability.</li>\\n<li>Adjust the tension of the guy
lines to provide optimal stability and wind resistance.</li>\\n</ul>\\n<h3
id=\\\"tent-takedown-and-storage\\\">Tent Takedown and Storage</h3>\\n<h4
id=\\\"step-1-removing-stakes-and-guy-lines\\\">Step 1: Removing Stakes
and Guy Lines</h4>\\n<ul>\\n<li>Remove all stakes from the
ground.</li>\\n<li>Untie or disconnect the guy lines from the tent and
store them separately.</li>\\n</ul>\",\"id\":null,\"title\":\"Information
about product item_number:
1\",\"filepath\":\"product_info_1.md\",\"url\":\"https://amipateldemo.blo\
b.core.windows.net/fileupload-my-product-info/product_info_1.md\",\"metad\
ata\":{\"chunking\":\"orignal document size=1544. Scores=3.739763Org
Highlight count=75.\"},\"chunk_id\":\"1\"},{\"content\":\"<h1
id=\\\"information-about-product-item_number-8\\\">Information about
product item_number: 8</h1>\\n<p>Alpine Explorer Tent, price
$350,</p>\\n<h2 id=\\\"brand\\\">Brand</h2>\\n<p>AlpineGear</p>\\n<h2
id=\\\"category\\\">Category</h2>\\n<p>Tents</p>\\n<h3
id=\\\"features\\\">Features</h3>\\n<ul>\\n<li>Waterproof: Provides
reliable protection against rain and moisture.</li>\\n<li>Easy Setup:
Simple and quick assembly process, making it convenient for
camping.</li>\\n<li>Room Divider: Includes a detachable divider to create
separate living spaces within the tent.</li>\\n<li>Excellent Ventilation:
Multiple mesh windows and vents promote airflow and reduce
condensation.</li>\\n<li>Gear Loft: Built-in gear loft or storage pockets
for organizing and storing camping gear.</li>\\n</ul>\\n<h2
id=\\\"technical-specs\\\">Technical Specs</h2>\\n<p><strong>Best
Use</strong>: Camping<br />\\n<strong>Capacity</strong>: 8-person<br
/>\\n<strong>Season Rating</strong>: 3-season<br
/>\\n<strong>Setup</strong>: Freestanding<br
/>\\n<strong>Material</strong>: Polyester<br
/>\\n<strong>Waterproof</strong>: Yes<br />\\n<strong>Floor Area</strong>:
120 square feet<br />\\n<strong>Peak Height</strong>: 6.5 feet<br
/>\\n<strong>Number of Doors</strong>: 2<br />\\n<strong>Color</strong>:
Orange<br />\\n<strong>Rainfly</strong>: Included<br />\\n<strong>Rainfly
Waterproof Rating</strong>: 3000mm<br />\\n<strong>Tent Poles</strong>:
Aluminum<br />\\n<strong>Pole Diameter</strong>: 12mm<br
/>\\n<strong>Ventilation</strong>: Mesh panels and adjustable vents<br
/>\\n<strong>Interior Pockets</strong>: 4 pockets<br />\\n<strong>Gear
Loft</strong>: Included<br />\\n<strong>Footprint</strong>: Sold
separately<br />\\n<strong>Guy Lines</strong>: Reflective<br
/>\\n<strong>Stakes</strong>: Aluminum<br />\\n<strong>Carry Bag</strong>:
Included<br />\\n<strong>Dimensions</strong>: 12ft x 10ft x 7ft (Length x
Width x Peak Height)<br />\\n<strong>Packed Size</strong>: 24 inches x 10
inches<br />\\n<strong>Weight</strong>: 17 lbs</p>\\n<h2
id=\\\"alpine-explorer-tent-user-guide\\\">Alpine Explorer Tent User
Guide</h2>\\n<p>Thank you for choosing the Alpine Explorer Tent. This user
guide provides instructions on how to set up, use, and maintain your tent
effectively. Please read this guide thoroughly before using the
tent.</p>\\n<h3 id=\\\"package-contents\\\">Package
Contents</h3>\\n<p>Ensure that the package includes the following
components:</p>\\n<ul>\\n<li>Alpine Explorer Tent body</li>\\n<li>Tent
poles</li>\\n<li>Rainfly</li>\\n<li>Stakes and guy lines</li>\\n<li>Carry
bag</li>\\n<li>User Guide</li>\\n</ul>\\n<p>If any components are missing
or damaged, please contact our customer support immediately.</p>\\n<h3
id=\\\"tent-setup\\\">Tent Setup</h3>\\n<p><strong>Step 1: Selecting a
Suitable Location</strong></p>\\n<ul>\\n<li>Find a level and clear area
for pitching the tent.</li>\\n<li>Remove any sharp objects or debris that
could damage the tent floor.</li>\\n</ul>\\n<p><strong>Step 2: Unpacking
and Organizing Components</strong></p>\\n<ul>\\n<li>Lay out all the tent
components on the ground.</li>\\n<li>Familiarize yourself with each part,
including the tent body, poles, rainfly, stakes, and guy
lines.</li>\\n</ul>\\n<p><strong>Step 3: Assembling the Tent
Poles</strong></p>\\n<ul>\\n<li>Connect the tent poles according to their
designated color codes or numbering.</li>\\n<li>Slide the connected poles
through the pole sleeves or attach them to the tent body
clips.</li>\\n</ul>\\n<p><strong>Step 4: Setting up the Tent
Body</strong></p>\\n<ul>\\n<li>Begin at one end and raise the tent body by
pushing up the poles.</li>\\n<li>Ensure that the tent body is evenly
stretched and centered.</li>\\n<li>Secure the tent body to the ground
using stakes and guy lines as needed.</li>\\n</ul>\\n<p><strong>Step 5:
Attaching the Rainfly</strong></p>\\n<ul>\\n<li>Spread the rainfly over
the tent body.</li>\\n<li>Attach the rainfly to the tent corners and
secure it with the provided buckles or clips.</li>\\n<li>Adjust the
tension of the rainfly to ensure proper airflow and weather
protection.</li>\\n</ul>\\n<p><strong>Step 6: Securing the
Tent</strong></p>\\n<ul>\\n<li>Stake down the tent corners and guy out the
guy lines for additional stability.</li>\\n<li>Adjust the tension of the
guy lines to provide optimal stability and wind
resistance.</li>\\n</ul>\\n<h3 id=\\\"tent-takedown-and-storage\\\">Tent
Takedown and Storage</h3>\\n<p><strong>Step 1: Removing Stakes and Guy
Lines</strong></p>\\n<ul>\\n<li>Remove all stakes from the
ground.</li>\\n<li>Untie or disconnect the guy lines from the tent and
store them separately.</li>\\n</ul>\\n<p><strong>Step 2: Taking Down the
Tent Body</strong></p>\\n<ul>\\n<li>Start by collapsing the tent poles
carefully.</li>\\n<li>Remove the poles from the pole sleeves or
clips.</li>\\n</ul>\",\"id\":null,\"title\":\"Information about product
item_number:
8\",\"filepath\":\"product_info_8.md\",\"url\":\"https://amipateldemo.blo\
b.core.windows.net/fileupload-my-product-info/product_info_8.md\",\"metad\
ata\":{\"chunking\":\"orignal document size=1419. Scores=3.8508284Org
Highlight count=77.\"},\"chunk_id\":\"1\"},{\"content\":\"<h1
id=\\\"information-about-product-item_number-15\\\">Information about
product item_number: 15</h1>\\n<p>SkyView 2-Person Tent, price
$200,</p>\\n<h2 id=\\\"brand\\\">Brand</h2>\\n<p>OutdoorLiving</p>\\n<h2
id=\\\"category\\\">Category</h2>\\n<p>Tents</p>\\n<h2
id=\\\"features\\\">Features</h2>\\n<ul>\\n<li>Spacious interior
comfortably accommodates two people</li>\\n<li>Durable and waterproof
materials for reliable protection against the elements</li>\\n<li>Easy and
quick setup with color-coded poles and intuitive design</li>\\n<li>Two
large doors for convenient entry and exit</li>\\n<li>Vestibules provide
extra storage space for gear</li>\\n<li>Mesh panels for enhanced
ventilation and reduced condensation</li>\\n<li>Rainfly included for added
weather protection</li>\\n<li>Freestanding design allows for versatile
placement</li>\\n<li>Multiple interior pockets for organizing small
items</li>\\n<li>Reflective guy lines and stake points for improved
visibility at night</li>\\n<li>Compact and lightweight for easy
transportation and storage</li>\\n<li>Double-stitched seams for increased
durability</li>\\n<li>Comes with a carrying bag for convenient
portability</li>\\n</ul>\\n<h2 id=\\\"technical-specs\\\">Technical
Specs</h2>\\n<ul>\\n<li><strong>Best Use</strong>: Camping,
Hiking</li>\\n<li><strong>Capacity</strong>:
2-person</li>\\n<li><strong>Seasons</strong>:
3-season</li>\\n<li><strong>Packed Weight</strong>: Approx. 8
lbs</li>\\n<li><strong>Number of Doors</strong>:
2</li>\\n<li><strong>Number of Vestibules</strong>:
2</li>\\n<li><strong>Vestibule Area</strong>: Approx. 8 square feet per
vestibule</li>\\n<li><strong>Rainfly</strong>:
Included</li>\\n<li><strong>Pole Material</strong>: Lightweight
aluminum</li>\\n<li><strong>Freestanding</strong>:
Yes</li>\\n<li><strong>Footprint Included</strong>:
No</li>\\n<li><strong>Tent Bag Dimensions</strong>: 7ft x 5ft x
4ft</li>\\n<li><strong>Packed Size</strong>:
Compact</li>\\n<li><strong>Color:</strong>
Blue</li>\\n<li><strong>Warranty</strong>: Manufacturer's warranty
included</li>\\n</ul>\\n<h2 id=\\\"user-guidemanual\\\">User
Guide/Manual</h2>\\n<ol>\\n<li>Tent Components</li>\\n</ol>\\n<p>The
SkyView 2-Person Tent includes the following components:\\n- Tent body\\n-
Rainfly\\n- Aluminum tent poles\\n- Tent stakes\\n- Guy lines\\n- Tent
bag</p>\\n<ol start=\\\"2\\\">\\n<li>Tent Setup</li>\\n</ol>\\n<p>Follow
these steps to set up your SkyView 2-Person Tent:</p>\\n<p>Step 1: Find a
suitable camping site with a level ground and clear of debris.\\nStep 2:
Lay out the tent body on the ground, aligning the doors and vestibules as
desired.\\nStep 3: Assemble the tent poles and insert them into the
corresponding pole sleeves or grommets on the tent body.\\nStep 4: Attach
the rainfly over the tent body, ensuring a secure fit.\\nStep 5: Stake
down the tent and rainfly using the provided tent stakes, ensuring a taut
pitch.\\nStep 6: Adjust the guy lines as needed to enhance stability and
ventilation.\\nStep 7: Once the tent is properly set up, organize your
gear inside and enjoy your camping experience.</p>\\n<ol
start=\\\"3\\\">\\n<li>Tent Takedown</li>\\n</ol>\\n<p>To dismantle and
pack up your SkyView 2-Person Tent, follow these steps:</p>\\n<p>Step 1:
Remove all gear and belongings from the tent.\\nStep 2: Remove the stakes
and guy lines from the ground.\\nStep 3: Detach the rainfly from the tent
body.\\nStep 4: Disassemble the tent poles and remove them from the tent
body.\\nStep 5: Fold and roll up the tent body, rainfly, and poles
separately.\\nStep 6: Place all components back into the tent bag,
ensuring a compact and organized packing.</p>\\n<ol
start=\\\"4\\\">\\n<li>Tent Care and Maintenance</li>\\n</ol>\\n<p>To
extend the lifespan of your SkyView 2-Person Tent, follow these care and
maintenance guidelines:</p>\\n<ul>\\n<li>Always clean and dry the tent
before storing it.</li>\\n<li>Avoid folding or storing the tent when it is
wet or damp to prevent mold or mildew growth.</li>\\n<li>Use a mild soap
and water solution to clean the tent if necessary, and avoid using harsh
chemicals or solvents.</li>\\n<li>Inspect the tent regularly for any
damages such as tears, punctures, or broken components. Repair or replace
as needed.</li>\\n<li>Store the tent in a cool, dry place away from direct
sunlight and extreme temperatures.</li>\\n<li>Avoid placing sharp objects
or excessive weight on the tent, as this may cause
damage.</li>\\n<li>Follow the manufacturer's recommendations for seam
sealing or re-waterproofing the tent if necessary.</li>\\n</ul>\\n<ol
start=\\\"5\\\">\\n<li>Safety Precautions</li>\\n</ol>\\n<ul>\\n<li>Always
choose a safe and suitable camping location, considering factors such as
terrain, weather conditions, and potential
hazards.</li>\\n</ul>\",\"id\":null,\"title\":\"Information about product
item_number:
15\",\"filepath\":\"product_info_15.md\",\"url\":\"https://amipateldemo.b\
lob.core.windows.net/fileupload-my-product-info/product_info_15.md\",\"me\
tadata\":{\"chunking\":\"orignal document size=1342. Scores=3.4607773Org
Highlight
count=70.\"},\"chunk_id\":\"1\"},{\"content\":\"<ul>\\n<li><strong>If
Membership status \\\"None \\\":</strong> Returns are accepted within 30
days of purchase, provided the tent is unused, undamaged and in its
original packaging. Customer is responsible for the cost of return
shipping. Once the returned item is received, a refund will be issued for
the cost of the item minus a 10% restocking fee. If the item was damaged
during shipping or if there is a defect, the customer should contact
customer service within 7 days of receiving the
item.</li>\\n<li><strong>If Membership status \\\"Gold\\\":</strong>
Returns are accepted within 60 days of purchase, provided the tent is
unused, undamaged and in its original packaging. Free return shipping is
provided. Once the returned item is received, a full refund will be
issued. If the item was damaged during shipping or if there is a defect,
the customer should contact customer service within 7 days of receiving
the item.</li>\\n<li><strong>If Membership status
\\\"Platinum\\\":</strong> Returns are accepted within 90 days of
purchase, provided the tent is unused, undamaged and in its original
packaging. Free return shipping is provided, and a full refund will be
issued. If the item was damaged during shipping or if there is a defect,
the customer should contact customer service within 7 days of receiving
the item.</li>\\n</ul>\\n<h2 id=\\\"reviews\\\">Reviews</h2>\\n<p>36)
<strong>Rating:</strong> 5\\n <strong>Review:</strong> The Alpine Explorer
Tent is amazing! It's easy to set up, has excellent ventilation, and the
room divider is a great feature for added privacy. Highly recommend it for
family camping trips!</p>\\n<p>37) <strong>Rating:</strong> 4\\n
<strong>Review:</strong> I bought the Alpine Explorer Tent, and while it's
waterproof and spacious, I wish it had more storage pockets. Overall, it's
a good tent for camping.</p>\\n<p>38) <strong>Rating:</strong> 5\\n
<strong>Review:</strong> The Alpine Explorer Tent is perfect for my
family's camping adventures. It's easy to set up, has great ventilation,
and the gear loft is an excellent addition. Love it!</p>\\n<p>39)
<strong>Rating:</strong> 4\\n <strong>Review:</strong> I like the Alpine
Explorer Tent, but I wish it came with a footprint. It's comfortable and
has many useful features, but a footprint would make it even better.
Overall, it's a great tent.</p>\\n<p>40) <strong>Rating:</strong> 5\\n
<strong>Review:</strong> This tent is perfect for our family camping
trips. It's spacious, easy to set up, and the room divider is a great
feature for added privacy. The gear loft is a nice bonus for extra
storage.</p>\\n<h2 id=\\\"faq\\\">FAQ</h2>\\n<p>34) How easy is it to set
up the Alpine Explorer Tent?\\n The Alpine Explorer Tent features a quick
and easy setup, thanks to color-coded poles and intuitive design. Most
users can set it up in just a few minutes.</p>\\n<p>35) Can the Alpine
Explorer Tent accommodate two queen-sized air mattresses?\\n Yes, the
Alpine Explorer Tent is spacious enough to accommodate two queen-sized air
mattresses, making it an ideal choice for comfortable family
camping.</p>\\n<p>36) What is the purpose of the room divider in the
Alpine Explorer Tent?\\n The room divider in the Alpine Explorer Tent
allows you to create separate sleeping and living spaces, providing
privacy and organization for your camping experience.</p>\\n<p>37) How
does the gear loft in the Alpine Explorer Tent work?\\n The gear loft in
the Alpine Explorer Tent is a suspended mesh shelf that provides
additional storage space for small items, keeping them organized and
easily accessible.</p>\\n<p>38) Can the Alpine Explorer Tent be used in
snowy conditions?\\n The Alpine Explorer Tent is designed primarily for
three-season use. While it can withstand light snowfall, it may not
provide adequate structural support and insulation during heavy snow or
extreme winter conditions.</p>\",\"id\":null,\"title\":\"Information about
product item_number:
8\",\"filepath\":\"product_info_8.md\",\"url\":\"https://amipateldemo.blo\
b.core.windows.net/fileupload-my-product-info/product_info_8.md\",\"metad\
ata\":{\"chunking\":\"orignal document size=906. Scores=5.568323Org
Highlight count=85.\"},\"chunk_id\":\"0\"},{\"content\":\"<p>If you have
any questions or need further assistance, please contact our customer
support:</p>\\n<ul>\\n<li>Customer Support Phone:
+1-800-123-4567</li>\\n<li>Customer Support Email:
[email protected]</li>\\n</ul>\\n<h2 id=\\\"return-policy\\\">Return
Policy</h2>\\n<ul>\\n<li><strong>If Membership status \\\"None
\\\":</strong> Returns are accepted within 30 days of purchase, provided
the tent is unused, undamaged and in its original packaging. Customer is
responsible for the cost of return shipping. Once the returned item is
received, a refund will be issued for the cost of the item minus a 10%
restocking fee. If the item was damaged during shipping or if there is a
defect, the customer should contact customer service within 7 days of
receiving the item.</li>\\n<li><strong>If Membership status
\\\"Gold\\\":</strong> Returns are accepted within 60 days of purchase,
provided the tent is unused, undamaged and in its original packaging. Free
return shipping is provided. Once the returned item is received, a full
refund will be issued. If the item was damaged during shipping or if there
is a defect, the customer should contact customer service within 7 days of
receiving the item.</li>\\n<li><strong>If Membership status
\\\"Platinum\\\":</strong> Returns are accepted within 90 days of
purchase, provided the tent is unused, undamaged and in its original
packaging. Free return shipping is provided, and a full refund will be
issued. If the item was damaged during shipping or if there is a defect,
the customer should contact customer service within 7 days of receiving
the item.</li>\\n</ul>\\n<h2 id=\\\"reviews\\\">Reviews</h2>\\n<p>1)
<strong>Rating:</strong> 5\\n <strong>Review:</strong> I am extremely
happy with my TrailMaster X4 Tent! It's spacious, easy to set up, and kept
me dry during a storm. The UV protection is a great addition too. Highly
recommend it to anyone who loves camping!</p>\\n<p>2)
<strong>Rating:</strong> 3\\n <strong>Review:</strong> I bought the
TrailMaster X4 Tent, and while it's waterproof and has a spacious
interior, I found it a bit difficult to set up. It's a decent tent, but I
wish it were easier to assemble.</p>\\n<p>3) <strong>Rating:</strong> 5\\n
<strong>Review:</strong> The TrailMaster X4 Tent is a fantastic investment
for any serious camper. The easy setup and spacious interior make it
perfect for extended trips, and the waterproof design kept us dry in heavy
rain.</p>\\n<p>4) <strong>Rating:</strong> 4\\n <strong>Review:</strong> I
like the TrailMaster X4 Tent, but I wish it came in more colors. It's
comfortable and has many useful features, but the green color just isn't
my favorite. Overall, it's a good tent.</p>\\n<p>5)
<strong>Rating:</strong> 5\\n <strong>Review:</strong> This tent is
perfect for my family camping trips. The spacious interior and convenient
storage pocket make it easy to stay organized. It's also super easy to set
up, making it a great addition to our gear.</p>\\n<h2
id=\\\"faq\\\">FAQ</h2>\\n<p>1) Can the TrailMaster X4 Tent be used in
winter conditions?\\n The TrailMaster X4 Tent is designed for 3-season use
and may not be suitable for extreme winter conditions with heavy snow and
freezing temperatures.</p>\\n<p>2) How many people can comfortably sleep
in the TrailMaster X4 Tent?\\n The TrailMaster X4 Tent can comfortably
accommodate up to 4 people with room for their gear.</p>\\n<p>3) Is there
a warranty on the TrailMaster X4 Tent?\\n Yes, the TrailMaster X4 Tent
comes with a 2-year limited warranty against manufacturing
defects.</p>\\n<p>4) Are there any additional accessories included with
the TrailMaster X4 Tent?\\n The TrailMaster X4 Tent includes a rainfly,
tent stakes, guy lines, and a carry bag for easy transport.</p>\\n<p>5)
Can the TrailMaster X4 Tent be easily carried during hikes?\\n Yes, the
TrailMaster X4 Tent weighs just 12lbs, and when packed in its carry bag,
it can be comfortably carried during
hikes.</p>\",\"id\":null,\"title\":\"Information about product
item_number:
1\",\"filepath\":\"product_info_1.md\",\"url\":\"https://amipateldemo.blo\
b.core.windows.net/fileupload-my-product-info/product_info_1.md\",\"metad\
ata\":{\"chunking\":\"orignal document size=981. Scores=4.0350547Org
Highlight count=74.\"},\"chunk_id\":\"0\"}]}"
is_chat_input: false
outputs:
gpt_relevance:
type: string
reference: ${concat_scores.output.gpt_relevance}
gpt_groundedness:
type: string
reference: ${concat_scores.output.gpt_groundedness}
gpt_retrieval_score:
type: string
reference: ${concat_scores.output.gpt_retrieval_score}
nodes:
- name: concat_scores
type: python
source:
type: code
path: concat_scores.py
inputs:
rag_generation_score: ${parse_generation_score.output}
rag_grounding_score: ${parse_grounding_score.output}
rag_retrieval_score: ${parse_retrieval_score.output}
use_variants: false
- name: aggregate_variants_results
type: python
source:
type: code
path: aggregate_variants_results.py
inputs:
metrics: ${inputs.metrics}
results: ${concat_scores.output}
aggregation: true
use_variants: false
- name: gpt_groundedness
type: llm
source:
type: code
path: rag_groundedness_prompt.jinja2
inputs:
deployment_name: gpt-4
temperature: 0
top_p: 1
stop: ""
max_tokens: 1000
presence_penalty: 0
frequency_penalty: 0
logit_bias: ""
FullBody: ${inputs.documents}
answer: ${inputs.answer}
question: ${inputs.question}
provider: AzureOpenAI
connection: open_ai_connection
api: chat
module: promptflow.tools.aoai
activate:
when: ${validate_input.output.gpt_groundedness}
is: true
use_variants: false
- name: gpt_retrieval_score
type: llm
source:
type: code
path: rag_retrieval_prompt.jinja2
inputs:
deployment_name: gpt-4
temperature: 0
top_p: 1
stop: ""
max_tokens: 1000
presence_penalty: 0
frequency_penalty: 0
logit_bias: ""
FullBody: ${inputs.documents}
question: ${inputs.question}
provider: AzureOpenAI
connection: open_ai_connection
api: chat
module: promptflow.tools.aoai
activate:
when: ${validate_input.output.gpt_retrieval_score}
is: true
use_variants: false
- name: gpt_relevance
type: llm
source:
type: code
path: rag_generation_prompt.jinja2
inputs:
deployment_name: gpt-4
temperature: 0
top_p: 1
stop: ""
max_tokens: 1000
presence_penalty: 0
frequency_penalty: 0
logit_bias: ""
FullBody: ${inputs.documents}
answer: ${inputs.answer}
question: ${inputs.question}
provider: AzureOpenAI
connection: open_ai_connection
api: chat
module: promptflow.tools.aoai
activate:
when: ${validate_input.output.gpt_relevance}
is: true
use_variants: false
- name: parse_generation_score
type: python
source:
type: code
path: parse_generation_score.py
inputs:
rag_generation_score: ${gpt_relevance.output}
use_variants: false
- name: parse_retrieval_score
type: python
source:
type: code
path: parse_retrival_score.py
inputs:
retrieval_output: ${gpt_retrieval_score.output}
use_variants: false
- name: parse_grounding_score
type: python
source:
type: code
path: parse_groundedness_score.py
inputs:
rag_grounding_score: ${gpt_groundedness.output}
use_variants: false
- name: select_metrics
type: python
source:
type: code
path: select_metrics.py
inputs:
metrics: ${inputs.metrics}
use_variants: false
- name: validate_input
type: python
source:
type: code
path: validate_input.py
inputs:
answer: ${inputs.answer}
documents: ${inputs.documents}
question: ${inputs.question}
selected_metrics: ${select_metrics.output}
use_variants: false
node_variants: {}
environment:
python_requirements_txt: requirements.txt
| promptflow/examples/flows/evaluation/eval-qna-rag-metrics/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/evaluation/eval-qna-rag-metrics/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 11138
} | 10 |
from promptflow import tool
@tool
def parse_translation(translation_results: dict, language: str) -> str:
return translation_results[language]
| promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/parse_translation.py/0 | {
"file_path": "promptflow/examples/flows/integrations/azure-ai-language/analyze_documents/parse_translation.py",
"repo_id": "promptflow",
"token_count": 40
} | 11 |
from promptflow import tool
@tool
def generate_goal(items: list = []) -> str:
"""
Generate a numbered list from given items based on the item_type.
Args:
items (list): A list of items to be numbered.
Returns:
str: The formatted numbered list.
"""
return "\n".join(f"{i + 1}. {item}" for i, item in enumerate(items))
| promptflow/examples/flows/standard/autonomous-agent/generate_goal.py/0 | {
"file_path": "promptflow/examples/flows/standard/autonomous-agent/generate_goal.py",
"repo_id": "promptflow",
"token_count": 132
} | 12 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
text:
type: string
default: Hello World!
outputs:
output:
type: string
reference: ${llm.output}
nodes:
- name: hello_prompt
type: prompt
source:
type: code
path: hello.jinja2
inputs:
text: ${inputs.text}
- name: llm
type: python
source:
type: code
path: hello.py
inputs:
connection: basic_custom_connection
deployment_name: text-davinci-003
max_tokens: "120"
prompt: ${hello_prompt.output}
environment:
python_requirements_txt: requirements.txt
| promptflow/examples/flows/standard/basic-with-connection/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/standard/basic-with-connection/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 243
} | 13 |
{"question": "What is Prompt flow?"}
{"question": "What is ChatGPT?"} | promptflow/examples/flows/standard/conditional-flow-for-if-else/data.jsonl/0 | {
"file_path": "promptflow/examples/flows/standard/conditional-flow-for-if-else/data.jsonl",
"repo_id": "promptflow",
"token_count": 22
} | 14 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json
inputs:
question:
type: string
default: Please describe this image.
input_image:
type: image
default: https://developer.microsoft.com/_devcom/images/logo-ms-social.png
outputs:
answer:
type: string
reference: ${question_on_image.output}
output_image:
type: string
reference: ${flip_image.output}
nodes:
- name: flip_image
type: python
source:
type: code
path: flip_image.py
inputs:
input_image: ${inputs.input_image}
- name: question_on_image
type: custom_llm
source:
type: package_with_prompt
tool: promptflow.tools.aoai_gpt4v.AzureOpenAI.chat
path: question_on_image.jinja2
inputs:
connection: aoai_gpt4v_connection
deployment_name: gpt-4v
max_tokens: 512
question: ${inputs.question}
test_image: ${flip_image.output}
| promptflow/examples/flows/standard/describe-image/flow.dag.yaml/0 | {
"file_path": "promptflow/examples/flows/standard/describe-image/flow.dag.yaml",
"repo_id": "promptflow",
"token_count": 359
} | 15 |
import asyncio
import logging
import time
import uuid
from typing import List
from openai.version import VERSION as OPENAI_VERSION
import os
from abc import ABC, abstractmethod
import tiktoken
from dotenv import load_dotenv
from prompt import PromptLimitException
class AOAI(ABC):
def __init__(self, **kwargs):
if OPENAI_VERSION.startswith("0."):
raise Exception(
"Please upgrade your OpenAI package to version >= 1.0.0 or "
"using the command: pip install --upgrade openai."
)
init_params = {}
api_type = os.environ.get("API_TYPE")
if os.getenv("OPENAI_API_VERSION") is not None:
init_params["api_version"] = os.environ.get("OPENAI_API_VERSION")
if os.getenv("OPENAI_ORG_ID") is not None:
init_params["organization"] = os.environ.get("OPENAI_ORG_ID")
if os.getenv("OPENAI_API_KEY") is None:
raise ValueError("OPENAI_API_KEY is not set in environment variables")
if os.getenv("OPENAI_API_BASE") is not None:
if api_type == "azure":
init_params["azure_endpoint"] = os.environ.get("OPENAI_API_BASE")
else:
init_params["base_url"] = os.environ.get("OPENAI_API_BASE")
init_params["api_key"] = os.environ.get("OPENAI_API_KEY")
# A few sanity checks
if api_type == "azure":
if init_params.get("azure_endpoint") is None:
raise ValueError(
"OPENAI_API_BASE is not set in environment variables, this is required when api_type==azure"
)
if init_params.get("api_version") is None:
raise ValueError(
"OPENAI_API_VERSION is not set in environment variables, this is required when api_type==azure"
)
if init_params["api_key"].startswith("sk-"):
raise ValueError(
"OPENAI_API_KEY should not start with sk- when api_type==azure, "
"are you using openai key by mistake?"
)
from openai import AzureOpenAI as Client
from openai import AsyncAzureOpenAI as AsyncClient
else:
from openai import OpenAI as Client
from openai import AsyncClient as AsyncClient
self.client = Client(**init_params)
self.async_client = AsyncClient(**init_params)
self.default_engine = None
self.engine = kwargs.pop('model', None) or os.environ.get("MODEL")
self.total_tokens = 4000
self.max_tokens = kwargs.pop('max_tokens', None) or os.environ.get("MAX_TOKENS") or 1200
if self.engine == "gpt-4-32k":
self.total_tokens = 31000
if self.engine == "gpt-4":
self.total_tokens = 7000
if self.engine == "gpt-3.5-turbo-16k":
self.total_tokens = 15000
if self.max_tokens > self.total_tokens:
raise ValueError(f"max_tokens must be less than total_tokens, "
f"total_tokens is {self.total_tokens}, max_tokens is {self.max_tokens}")
self.tokens_limit = self.total_tokens - self.max_tokens
def count_tokens(self, text: str) -> int:
try:
encoding = tiktoken.encoding_for_model(self.engine)
except KeyError:
encoding = tiktoken.encoding_for_model(self.default_engine)
return len(encoding.encode(text))
def query(self, text, **kwargs):
stream = kwargs.pop("stream", False)
for i in range(3):
try:
if not stream:
return self.query_with_no_stream(text, **kwargs)
else:
return "".join(self.query_with_stream(text, **kwargs))
except Exception as e:
logging.error(f"Query failed, message={e}, "
f"will retry request llm after {(i + 1) * (i + 1)} seconds.")
time.sleep((i + 1) * (i + 1))
raise Exception("Query failed, and retry 3 times, but still failed.")
async def async_query(self, text, **kwargs):
stream = kwargs.pop("stream", False)
for i in range(3):
try:
if not stream:
res = await self.async_query_with_no_stream(text, **kwargs)
return res
else:
res = await self.async_query_with_stream(text, **kwargs)
return "".join(res)
except Exception as e:
logging.error(f"llm response error, message={e}, "
f"will retry request llm after {(i + 1) * (i + 1)} seconds.")
await asyncio.sleep((i + 1) * (i + 1))
raise Exception("llm response error, and retry 3 times, but still failed.")
@abstractmethod
def query_with_no_stream(self, text, **kwargs):
pass
@abstractmethod
def query_with_stream(self, text, **kwargs):
pass
@abstractmethod
async def async_query_with_no_stream(self, text, **kwargs):
pass
@abstractmethod
async def async_query_with_stream(self, text, **kwargs):
pass
class ChatLLM(AOAI):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.default_engine = "gpt-3.5-turbo"
self.engine = self.engine or self.default_engine
self.system_prompt = "You are a Python engineer."
self.conversation = dict()
def query_with_no_stream(self, text, **kwargs):
conversation_id = kwargs.pop('conversation', None)
messages = self.create_prompt(text, conversation_id)
self.validate_tokens(messages)
temperature = kwargs.pop("temperature", 0.1)
response = self.client.chat.completions.create(
model=self.engine,
messages=messages,
temperature=temperature,
max_tokens=self.max_tokens,
stream=False,
**kwargs,
)
response_role = response.choices[0].message.role
full_response = response.choices[0].message.content
self.add_to_conversation(text, "user", conversation_id=conversation_id)
self.add_to_conversation(full_response, response_role, conversation_id=conversation_id)
return full_response
def query_with_stream(self, text, **kwargs):
conversation_id = kwargs.pop('conversation', None)
messages = self.create_prompt(text, conversation_id)
self.validate_tokens(messages)
temperature = kwargs.pop("temperature", 0.1)
response = self.client.chat.completions.create(
model=self.engine,
messages=messages,
temperature=temperature,
max_tokens=self.max_tokens,
stream=True,
**kwargs,
)
response_role = None
full_response = ""
for chunk in response:
delta = chunk.choices[0].delta
response_role = delta.role
if delta.content:
content = delta.content
full_response += content
yield content
self.add_to_conversation(text, "user", conversation_id=conversation_id)
self.add_to_conversation(full_response, response_role, conversation_id=conversation_id)
async def async_query_with_no_stream(self, text, **kwargs):
conversation_id = kwargs.pop('conversation', None)
messages = self.create_prompt(text, conversation_id)
self.validate_tokens(messages)
temperature = kwargs.pop("temperature", 0.1)
response = await self.async_client.chat.completions.create(
model=self.engine,
messages=messages,
temperature=temperature,
max_tokens=self.max_tokens,
stream=False,
**kwargs,
)
response_role = response.choices[0].message.role
full_response = response.choices[0].message.content
self.add_to_conversation(text, "user", conversation_id=conversation_id)
self.add_to_conversation(full_response, response_role, conversation_id=conversation_id)
return full_response
async def async_query_with_stream(self, text, **kwargs):
conversation_id = kwargs.pop('conversation', None)
messages = self.create_prompt(text, conversation_id)
self.validate_tokens(messages)
temperature = kwargs.pop("temperature", 0.1)
response = await self.async_client.chat.completions.create(
model=self.engine,
messages=messages,
temperature=temperature,
max_tokens=self.max_tokens,
stream=True,
**kwargs,
)
response_role = None
full_response = ""
for chunk in response:
delta = chunk.choices[0].delta
response_role = delta.role
if delta.content:
content = delta.content
full_response += content
yield content
self.add_to_conversation(text, "user", conversation_id=conversation_id)
self.add_to_conversation(full_response, response_role, conversation_id=conversation_id)
def get_unique_conversation_id(self):
return str(uuid.uuid4()).replace('-', '')
def add_to_conversation(self, message: str, role: str, conversation_id: str) -> None:
"""
Add a message to the conversation
"""
if type(conversation_id) is str:
self.conversation[conversation_id].append({"role": role, "content": message})
def del_conversation(self, conversation_id: str) -> None:
if conversation_id in self.conversation:
del self.conversation[conversation_id]
def init_conversation(self, conversation_id: str, system_prompt) -> None:
"""
Init a new conversation
"""
if type(conversation_id) is str:
self.conversation[conversation_id] = [{"role": "system", "content": system_prompt}]
def get_tokens_count(self, messages: List[dict]) -> int:
"""
Get token count
"""
num_tokens = 0
for message in messages:
# every message follows <im_start>{role/name}\n{content}<im_end>\n
num_tokens += 5
for key, value in message.items():
if value:
num_tokens += self.count_tokens(value)
if key == "name": # if there's a name, the role is omitted
num_tokens += 5 # role is always required and always 1 token
num_tokens += 5 # every reply is primed with <im_start>assistant
return num_tokens
def validate_tokens(self, messages: List[dict]) -> None:
total_tokens = self.get_tokens_count(messages)
if total_tokens > self.tokens_limit:
message = f"token count {total_tokens} exceeds limit {self.tokens_limit}"
raise PromptLimitException(message)
def create_prompt(self, text: str, conversation_id: str = None):
unique_conversation_id = self.get_unique_conversation_id()
conversation_id = conversation_id or unique_conversation_id
if conversation_id not in self.conversation:
self.init_conversation(conversation_id=conversation_id, system_prompt=self.system_prompt)
_conversation = self.conversation[conversation_id] + [{"role": "user", "content": text}]
while self.get_tokens_count(_conversation) > self.tokens_limit and len(_conversation) > 2:
_conversation.pop(1)
if unique_conversation_id == conversation_id:
self.del_conversation(conversation_id=unique_conversation_id)
return _conversation
if __name__ == "__main__":
load_dotenv()
llm = ChatLLM()
print(llm.query(text='how are you?'))
res = llm.query_with_stream(text='how are you?')
for item in res:
print(item)
| promptflow/examples/flows/standard/gen-docstring/azure_open_ai.py/0 | {
"file_path": "promptflow/examples/flows/standard/gen-docstring/azure_open_ai.py",
"repo_id": "promptflow",
"token_count": 5578
} | 16 |
# Math to Code
Math to Code is a project that utilizes the power of the chatGPT model to generate code that models math questions and then executes the generated code to obtain the final numerical answer.
> [!NOTE]
>
> Building a system that generates executable code from user input with LLM is [a complex problem with potential security risks](
https://developer.nvidia.com/blog/securing-llm-systems-against-prompt-injection/
), this example is more of a demonstration rather than something you can directly use in production. To build such system correctly, you should address key security considerations like input validation, additional sanitization of the code generated or better run the generated code in a sandbox environment.
Tools used in this flow:
- `python` tool
- built-in `llm` tool
Connections used in this flow:
- `open_ai` connection
## Prerequisites
Install promptflow sdk and other dependencies:
```cmd
pip install -r requirements.txt
```
## Setup connection
Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one.
Note in this example, we are using [chat api](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/chatgpt?pivots=programming-language-chat-completions), please use `gpt-35-turbo` or `gpt-4` model deployment.
Create connection if you haven't done that. Ensure you have put your azure open ai endpoint key in [azure_openai.yml](azure_openai.yml) file.
```bash
# Override keys with --set to avoid yaml file changes
pf connection create -f ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base>
```
Ensure you have created `open_ai_connection` connection.
```bash
pf connection show -n open_ai_connection
```
## Run flow in local
### Run locally with single line input
```bash
# test with default input value in flow.dag.yaml
pf flow test --flow .
# test with specific input
pf flow test --flow . --inputs math_question='If a rectangle has a length of 10 and width of 5, what is the area?'
```
### Run with multiple lines data
- create run
```bash
# create a random run name
run_name="math_to_code_"$(openssl rand -hex 12)
pf run create --flow . --data ./math_data.jsonl --column-mapping math_question='${data.question}' --name $run_name --stream
```
### Get the accuracy using evaluation flow
Use [eval-accuracy-maths-to-code](../../evaluation/eval-accuracy-maths-to-code/) to evaluate accuracy and error rate metrics against the math-to-code flow.
- accuracy: if the generated code can be correctly executed and got final number answer, it will be compare with the groundtruth in the test data. For single instance, it's True if the final number equals to the groundtruth, False otherwise. Accuracy is to measure the correct percentage against test data.
- error_rate: some case the flow cannot get number answer, for example, the generated code cannot be executed due to code parsing error of dependent package not available in conda env. Error rate is to measure the percentage of this case in test data.
```bash
# create a random eval run name
eval_run_name="math_to_code_eval_run_"$(openssl rand -hex 12)
# invoke accuracy and error rate evaluation against math-to-code batch run
pf run create --flow ../../evaluation/eval-accuracy-maths-to-code/ --data ./math_data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.answer}' --run $run_name --name $eval_run_name --stream
# view the run details
pf run show-details -n $eval_run_name
pf run show-metrics -n $eval_run_name
```
| promptflow/examples/flows/standard/maths-to-code/README.md/0 | {
"file_path": "promptflow/examples/flows/standard/maths-to-code/README.md",
"repo_id": "promptflow",
"token_count": 1060
} | 17 |
import unittest
import traceback
import os
import promptflow.azure as azure
from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential
import promptflow
class BaseTest(unittest.TestCase):
def setUp(self) -> None:
root = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../")
self.flow_path = os.path.join(root, "named-entity-recognition")
self.data_path = os.path.join(self.flow_path, "data.jsonl")
self.eval_match_rate_flow_path = os.path.join(root, "../evaluation/eval-entity-match-rate")
self.all_runs_generated = []
return super().setUp()
def tearDown(self):
for run in self.all_runs_generated:
try:
self.pf.runs.archive(run.name)
except Exception as e:
print(e)
traceback.print_exc()
return super().setUp()
def check_run_basics(self, run, name):
self.assertTrue(run is not None)
self.assertEqual(run.display_name, name)
self.assertEqual(run.tags["unittest"], "true")
class TestEvalAzure(BaseTest):
def setUp(self) -> None:
try:
credential = DefaultAzureCredential()
# Check if given credential can get token successfully.
credential.get_token("https://management.azure.com/.default")
except Exception:
# Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work
credential = InteractiveBrowserCredential()
self.pf = azure.PFClient.from_config(credential=credential)
return super().setUp()
def test_bulk_run_and_eval(self):
run = self.pf.run(
flow=self.flow_path,
data=self.data_path,
column_mapping={
"text": "${data.text}",
"entity_type": "${data.entity_type}"
},
connections={"NER_LLM": {"connection": "open_ai_connection"}},
display_name="ner_bulk_run",
tags={"unittest": "true"},
stream=True)
self.all_runs_generated.append(run)
self.check_run_basics(run, "ner_bulk_run")
eval = self.pf.run(
flow=self.eval_match_rate_flow_path,
run=run,
data=self.data_path,
column_mapping={
"entities": "${run.outputs.entities}",
"ground_truth": "${data.results}"
},
display_name="eval_match_rate",
tags={"unittest": "true"},
stream=True)
self.all_runs_generated.append(eval)
self.check_run_basics(eval, "eval_match_rate")
return eval
class TestEval(BaseTest):
def setUp(self) -> None:
self.pf = promptflow.PFClient()
return super().setUp()
def test_bulk_run_and_eval(self):
run = self.pf.run(
flow=self.flow_path,
data=self.data_path,
column_mapping={
"text": "${data.text}",
"entity_type": "${data.entity_type}"
},
display_name="ner_bulk_run",
tags={"unittest": "true"},
stream=True)
self.all_runs_generated.append(run)
self.check_run_basics(run, "ner_bulk_run")
eval = self.pf.run(
flow=self.eval_match_rate_flow_path,
run=run,
data=self.data_path,
column_mapping={
"entities": "${run.outputs.entities}",
"ground_truth": "${data.results}"
},
display_name="eval_match_rate",
tags={"unittest": "true"},
stream=True)
self.all_runs_generated.append(eval)
self.check_run_basics(eval, "eval_match_rate")
return eval
| promptflow/examples/flows/standard/named-entity-recognition/eval_test.py/0 | {
"file_path": "promptflow/examples/flows/standard/named-entity-recognition/eval_test.py",
"repo_id": "promptflow",
"token_count": 1876
} | 18 |
from pathlib import Path
from ruamel.yaml import YAML
def collect_tools_from_directory(base_dir) -> dict:
tools = {}
yaml = YAML()
for f in Path(base_dir).glob("**/*.yaml"):
with open(f, "r") as f:
tools_in_file = yaml.load(f)
for identifier, tool in tools_in_file.items():
tools[identifier] = tool
return tools
def list_package_tools():
"""List package tools"""
yaml_dir = Path(__file__).parents[1] / "yamls"
return collect_tools_from_directory(yaml_dir)
| promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/utils.py/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/my_tool_package/tools/utils.py",
"repo_id": "promptflow",
"token_count": 236
} | 19 |
from my_tool_package.tools.tool_with_dynamic_list_input import my_tool, my_list_func
def test_my_tool():
result = my_tool(input_text=["apple", "banana"], input_prefix="My")
assert result == 'Hello My apple,banana'
def test_my_list_func():
result = my_list_func(prefix="My")
assert len(result) == 10
assert "value" in result[0]
| promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_dynamic_input.py/0 | {
"file_path": "promptflow/examples/tools/tool-package-quickstart/tests/test_tool_with_dynamic_input.py",
"repo_id": "promptflow",
"token_count": 132
} | 20 |
---
resources: examples/connections/azure_openai.yml, examples/flows/standard/web-classification
---
# Deploy flow using Azure App Service
This example demos how to deploy a flow using Azure App Service.
[Azure App Service](https://learn.microsoft.com/azure/app-service/) is an HTTP-based service for hosting web applications, REST APIs, and mobile back ends.
The scripts (`deploy.sh` for bash and `deploy.ps1` for powershell) under this folder are here to help deploy the docker image to Azure App Service.
We will use [web-classification](../../../flows/standard/web-classification/README.md) as example in this tutorial.
## Build a flow as docker format app
Note that all dependent connections must be created before building as docker.
```bash
# create connection if not created before
pf connection create --file ../../../connections/azure_openai.yml --set api_key=<your_api_key> api_base=<your_api_base> --name open_ai_connection
```
Use the command below to build a flow as docker format app:
```bash
pf flow build --source ../../../flows/standard/web-classification --output dist --format docker
```
## Deploy with Azure App Service
The two scripts will do the following things:
1. Create a resource group if not exists.
2. Build and push the image to docker registry.
3. Create an app service plan with the give sku.
4. Create an app with specified name, set the deployment container image to the pushed docker image.
5. Set up the environment variables for the app.
Example command to use bash script:
```shell
bash deploy.sh --path dist -i <image_tag> --name my_app_23d8m -r <docker registry> -g <resource_group>
```
Example command to use powershell script:
```powershell
.\deploy.ps1 dist -i <image_tag> -n my-app-23d8m -r <docker registry> -g <resource_group>
```
Note that the `name` will produce a unique FQDN as AppName.azurewebsites.net.
See the full parameters by `bash deploy.sh -h` or `.\deploy.ps1 -h`.
## View and test the web app
The web app can be found via [azure portal](https://portal.azure.com/)

After the app created, you will need to go to https://portal.azure.com/ find the app and set up the environment variables
at (Settings>Configuration) or (Settings>Environment variables), then restart the app.

Browse the app at Overview and see the test page:

You can also test the app by sending a POST request to the app like:
```shell
curl http://<Default-domain-of-app-service>/score --data '{"url":"https://play.google.com/store/apps/details?id=com.twitter.android"}' -X POST -H "Content-Type: application/json"
```
Tips:
- Reach deployment logs at (Deployment>Deployment Central) and app logs at (Monitoring>Log stream).
- Reach advanced deployment tools at https://$name.scm.azurewebsites.net/.
- Reach more details about app service at https://learn.microsoft.com/azure/app-service/.
| promptflow/examples/tutorials/flow-deploy/azure-app-service/README.md/0 | {
"file_path": "promptflow/examples/tutorials/flow-deploy/azure-app-service/README.md",
"repo_id": "promptflow",
"token_count": 872
} | 21 |
---
kind: Namespace
apiVersion: v1
metadata:
name: web-classification
---
apiVersion: v1
kind: Secret
metadata:
name: open-ai-connection-api-key
namespace: web-classification
type: Opaque
data:
open-ai-connection-api-key: <encoded_secret>
---
apiVersion: v1
kind: Service
metadata:
name: web-classification-service
namespace: web-classification
spec:
type: NodePort
ports:
- name: http
port: 8080
targetPort: 8080
nodePort: 30123
selector:
app: web-classification-serve-app
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web-classification-serve-app
namespace: web-classification
spec:
selector:
matchLabels:
app: web-classification-serve-app
template:
metadata:
labels:
app: web-classification-serve-app
spec:
containers:
- name: web-classification-serve-container
image: web-classification-serve
imagePullPolicy: Never
ports:
- containerPort: 8080
env:
- name: OPEN_AI_CONNECTION_API_KEY
valueFrom:
secretKeyRef:
name: open-ai-connection-api-key
key: open-ai-connection-api-key | promptflow/examples/tutorials/flow-deploy/kubernetes/deployment.yaml/0 | {
"file_path": "promptflow/examples/tutorials/flow-deploy/kubernetes/deployment.yaml",
"repo_id": "promptflow",
"token_count": 487
} | 22 |
import argparse
import os
import sys
from pathlib import Path
from utils import Color, run_command, print_red
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=Color.RED + "Test Coverage for Promptflow!" + Color.END + "\n")
parser.add_argument("-p", required=True, nargs="+", help="The paths to calculate code coverage")
parser.add_argument("-t", required=True, nargs="+", help="The path to the tests")
parser.add_argument("-l", required=True, help="Location to run tests in")
parser.add_argument(
"-m",
required=True,
help="Pytest marker to identify the tests to run",
default="all",
)
parser.add_argument(
"-o",
required=False,
help="Pytest output file name",
default="test-results.xml",
)
parser.add_argument("-n", help="Pytest number of process to run the tests", default="auto")
parser.add_argument(
"--model-name",
help="The model file name to run the tests",
type=str,
default="",
)
parser.add_argument("--timeout", help="Timeout for individual tests (seconds)", type=str, default="")
parser.add_argument(
"--coverage-config",
help="The path of code coverage config file",
type=str,
default="",
)
parser.add_argument(
"--disable-cov-branch",
action="store_true",
help="Whether to enable branch coverage calculation",
)
parser.add_argument(
"--ignore-glob",
help="The path of ignored test file",
type=str,
default="",
)
args = parser.parse_args()
print("Working directory: " + str(os.getcwd()))
print("Args.p: " + str(args.p))
print("Args.t: " + str(args.t))
print("Args.l: " + str(args.l))
print("Args.m: " + str(args.m))
print("Args.n: " + str(args.n))
print("Args.o: " + str(args.o))
print("Args.model-name: " + str(args.model_name))
print("Args.timeout: " + str(args.timeout))
print("Args.coverage-config: " + str(args.coverage_config))
print("Args.ignore-glob: " + str(args.ignore_glob))
print("Args.disable-cov-branch: " + str(args.disable_cov_branch))
test_paths_list = [str(Path(path).absolute()) for path in args.t]
# display a list of all Python packages installed in the current Python environment
run_command(["pip", "list"])
run_command(["pip", "show", "promptflow", "promptflow-sdk"])
pytest_command = ["pytest", f"--junitxml={args.o}"]
pytest_command += test_paths_list
if args.coverage_config:
if args.p:
cov_path_list = [f"--cov={path}" for path in args.p]
pytest_command += cov_path_list
if not args.disable_cov_branch:
pytest_command += ["--cov-branch"]
pytest_command += [ # noqa: W503
"--cov-report=term",
"--cov-report=html",
"--cov-report=xml",
]
pytest_command = pytest_command + [f"--cov-config={args.coverage_config}"]
if args.ignore_glob:
pytest_command = pytest_command + [f"--ignore-glob={args.ignore_glob}"]
pytest_command += [
"-n",
args.n,
"--dist",
"loadfile",
"--log-level=info",
"--log-format=%(asctime)s %(levelname)s %(message)s",
"--log-date-format=[%Y-%m-%d %H:%M:%S]",
"--durations=5",
"-ra",
"-vv",
]
if args.timeout:
pytest_command = pytest_command + [
"--timeout",
args.timeout,
"--timeout_method",
"thread",
]
if args.m != "all":
pytest_command = pytest_command + ["-m", args.m]
if args.model_name:
pytest_command = pytest_command + ["--model-name", args.model_name]
# pytest --junit-xml=test-results.xml --cov=azure.ai.ml --cov-report=html --cov-report=xml -ra ./tests/*/unittests/
error_code, _ = run_command(pytest_command, throw_on_retcode=False)
# https://docs.pytest.org/en/7.1.x/reference/exit-codes.html
if error_code == 1:
print_red("Tests were collected and run but some of the tests failed.")
elif error_code == 2:
print_red("Test execution was interrupted by the user.")
elif error_code == 3:
print_red("Internal error happened while executing tests.")
elif error_code == 4:
print_red("pytest command line usage error.")
elif error_code == 5:
print_red("No tests were collected.")
sys.exit(error_code)
| promptflow/scripts/building/run_coverage_tests.py/0 | {
"file_path": "promptflow/scripts/building/run_coverage_tests.py",
"repo_id": "promptflow",
"token_count": 1969
} | 23 |
# Curl Install Script Information
The scripts in this directory are used for installing through curl and they point to the packages on PyPI.
## Install or update promptflow
curl https://promptflowartifact.blob.core.windows.net/linux-install-scripts/install | bash
The script can also be downloaded and run locally. You may have to restart your shell in order for the changes to take effect.
## Uninstall promptflow
Uninstall the promptflow by directly deleting the files from the location chosen at the time of installation.
1. Remove the installed CLI files.
```bash
# The default install/executable location is the user's home directory ($HOME).
rm -r $HOME/lib/promptflow
rm $HOME/bin/pf
rm $HOME/bin/pfs
rm $HOME/bin/pfazure
```
2. Modify your `$HOME/.bash_profile` or `$HOME/.bashrc` file to remove the following line:
```text
export PATH=$PATH:$HOME/bin
```
3. If using `bash` or `zsh`, reload your shell's command cache.
```bash
hash -r
``` | promptflow/scripts/installer/curl_install_pypi/README.md/0 | {
"file_path": "promptflow/scripts/installer/curl_install_pypi/README.md",
"repo_id": "promptflow",
"token_count": 305
} | 24 |
DIM objshell
set objshell = wscript.createobject("wscript.shell")
iReturn = objshell.run("pfs.bat start --force", 0, true) | promptflow/scripts/installer/windows/scripts/promptflow_service.vbs/0 | {
"file_path": "promptflow/scripts/installer/windows/scripts/promptflow_service.vbs",
"repo_id": "promptflow",
"token_count": 41
} | 25 |
- name: {{ step_name }}
uses: azure/login@v1
with:
creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} | promptflow/scripts/readme/ghactions_driver/workflow_steps/step_azure_login.yml.jinja2/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/workflow_steps/step_azure_login.yml.jinja2",
"repo_id": "promptflow",
"token_count": 47
} | 26 |
# This code is autogenerated.
# Code is generated by running custom script: python3 readme.py
# Any manual changes to this file may cause incorrect behavior.
# Any manual changes will be overwritten if the code is regenerated.
name: {{ workflow_name }}
on:
schedule:
- cron: "{{ crontab }}" # {{ crontab_comment }}
pull_request:
branches: [ main ]
paths: {{ path_filter }}
workflow_dispatch:
env:
IS_IN_CI_PIPELINE: "true"
jobs:
{{ workflow_name }}:
{%- filter indent(width=4) -%}
{% block steps %}
{% endblock steps %}
{%- endfilter -%}
| promptflow/scripts/readme/ghactions_driver/workflow_templates/workflow_skeleton.yml.jinja2/0 | {
"file_path": "promptflow/scripts/readme/ghactions_driver/workflow_templates/workflow_skeleton.yml.jinja2",
"repo_id": "promptflow",
"token_count": 195
} | 27 |
import argparse
import os
import re
from jinja2 import Environment, FileSystemLoader
def make_pythonic_variable_name(input_string):
variable_name = input_string.strip()
variable_name = re.sub(r'\W|^(?=\d)', '_', variable_name)
if not variable_name[0].isalpha() and variable_name[0] != '_':
variable_name = f'_{variable_name}'
return variable_name
def convert_tool_name_to_class_name(tool_name):
return ''.join(word.title() for word in tool_name.split('_'))
def create_file(path):
with open(path, 'w'):
pass
def create_folder(path):
os.makedirs(path, exist_ok=True)
def create_tool_project_structure(destination: str, package_name: str, tool_name: str,
function_name: str, is_class_way=False):
if is_class_way:
class_name = convert_tool_name_to_class_name(tool_name)
# Load templates
templates_abs_path = os.path.join(os.path.dirname(__file__), "templates")
file_loader = FileSystemLoader(templates_abs_path)
env = Environment(loader=file_loader)
# Create new directory
if os.path.exists(destination):
print("Destination already exists. Please choose another one.")
return
os.makedirs(destination, exist_ok=True)
# Generate setup.py
template = env.get_template('setup.py.j2')
output = template.render(package_name=package_name, tool_name=tool_name)
with open(os.path.join(destination, 'setup.py'), 'w') as f:
f.write(output)
# Generate MANIFEST.in
template = env.get_template('MANIFEST.in.j2')
output = template.render(package_name=package_name)
with open(os.path.join(destination, 'MANIFEST.in'), 'w') as f:
f.write(output)
# Create tools folder and __init__.py, tool.py inside it
tools_dir = os.path.join(destination, package_name, 'tools')
create_folder(tools_dir)
create_file(os.path.join(tools_dir, '__init__.py'))
with open(os.path.join(tools_dir, '__init__.py'), 'w') as f:
f.write('__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore\n')
# Generate tool.py
if is_class_way:
template = env.get_template('tool2.py.j2')
output = template.render(class_name=class_name, function_name=function_name)
else:
template = env.get_template('tool.py.j2')
output = template.render(function_name=function_name)
with open(os.path.join(tools_dir, f'{tool_name}.py'), 'w') as f:
f.write(output)
# Generate utils.py
template = env.get_template('utils.py.j2')
output = template.render()
with open(os.path.join(tools_dir, 'utils.py'), 'w') as f:
f.write(output)
create_file(os.path.join(destination, package_name, '__init__.py'))
with open(os.path.join(destination, package_name, '__init__.py'), 'w') as f:
f.write('__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore\n')
# Create yamls folder and __init__.py inside it
yamls_dir = os.path.join(destination, package_name, 'yamls')
create_folder(yamls_dir)
# Create tool yaml
if is_class_way:
template = env.get_template('tool2.yaml.j2')
output = template.render(package_name=package_name, tool_name=tool_name, class_name=class_name,
function_name=function_name)
else:
template = env.get_template('tool.yaml.j2')
output = template.render(package_name=package_name, tool_name=tool_name, function_name=function_name)
with open(os.path.join(yamls_dir, f'{tool_name}.yaml'), 'w') as f:
f.write(output)
# Create test folder and __init__.py inside it
tests_dir = os.path.join(destination, 'tests')
create_folder(tests_dir)
create_file(os.path.join(tests_dir, '__init__.py'))
# Create test_tool.py
if is_class_way:
template = env.get_template('test_tool2.py.j2')
output = template.render(package_name=package_name, tool_name=tool_name, class_name=class_name,
function_name=function_name)
else:
template = env.get_template('test_tool.py.j2')
output = template.render(package_name=package_name, tool_name=tool_name, function_name=function_name)
with open(os.path.join(tests_dir, f'test_{tool_name}.py'), 'w') as f:
f.write(output)
print(f'Generated tool package template for {package_name} at {destination}')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="promptflow tool template generation arguments.")
parser.add_argument("--package-name", "-p", type=str, help="your tool package's name", required=True)
parser.add_argument("--destination", "-d", type=str,
help="target folder you want to place the generated template", required=True)
parser.add_argument("--tool-name", "-t", type=str,
help="your tool's name, by default is hello_world_tool", required=False)
parser.add_argument("--function-name", "-f", type=str,
help="your tool's function name, by default is your tool's name", required=False)
parser.add_argument("--use-class", action='store_true', help="Specify whether to use a class implementation way.")
args = parser.parse_args()
destination = args.destination
package_name = make_pythonic_variable_name(args.package_name)
package_name = package_name.lower()
if args.tool_name:
tool_name = make_pythonic_variable_name(args.tool_name)
else:
tool_name = 'hello_world_tool'
tool_name = tool_name.lower()
if args.function_name:
function_name = make_pythonic_variable_name(args.function_name)
else:
function_name = tool_name
function_name = function_name.lower()
create_tool_project_structure(destination, package_name, tool_name, function_name, args.use_class)
| promptflow/scripts/tool/generate_tool_package_template.py/0 | {
"file_path": "promptflow/scripts/tool/generate_tool_package_template.py",
"repo_id": "promptflow",
"token_count": 2385
} | 28 |
import re
from azure.core.exceptions import HttpResponseError, ResourceExistsError
from azure.identity import ClientSecretCredential
from azure.keyvault.secrets import SecretClient
from exceptions import (
SecretNameAlreadyExistsException,
SecretNameInvalidException,
SecretNoSetPermissionException,
)
key_vault_name = "github-promptflow"
container_name = "tools"
KVUri = f"https://{key_vault_name}.vault.azure.net"
def init_used_secret_names(client: SecretClient):
global reserved_secret_names
reserved_secret_names = list_secret_names(client)
def get_secret_client(
tenant_id: str, client_id: str, client_secret: str
) -> SecretClient:
credential = ClientSecretCredential(tenant_id, client_id, client_secret)
client = SecretClient(vault_url=KVUri, credential=credential)
return client
reserved_secret_names = []
def get_secret(secret_name: str, client: SecretClient):
secret = client.get_secret(secret_name)
return secret.value
def list_secret_names(client: SecretClient) -> list:
secret_properties = client.list_properties_of_secrets()
return [secret.name for secret in secret_properties]
def validate_secret_name(secret_name: str):
# Check if secret name is valid. Secret name can only contain alphanumeric characters and dashes.
pattern = "^[a-zA-Z0-9-]+$"
if not re.match(pattern, secret_name):
raise SecretNameInvalidException(
"Secret name can only contain alphanumeric characters and dashes"
)
# Check if secret name is one of the reserved names
if secret_name in reserved_secret_names:
raise SecretNameAlreadyExistsException(
f"Secret name {secret_name} already exists"
)
def upload_secret(client: SecretClient, secret_name: str, secret_value: str):
try:
client.set_secret(secret_name, secret_value)
except ResourceExistsError as ex:
if "in a deleted but recoverable state" in str(ex):
raise SecretNameAlreadyExistsException(
f"Secret name {secret_name} is deleted but recoverable, and its name cannot be reused"
)
except HttpResponseError as ex:
if (
ex.status_code == 403
and "does not have secrets set permission on key vault" in str(ex)
):
raise SecretNoSetPermissionException(
f"No set permission on key vault {key_vault_name}"
)
print("Done.")
| promptflow/scripts/tool/utils/secret_manager.py/0 | {
"file_path": "promptflow/scripts/tool/utils/secret_manager.py",
"repo_id": "promptflow",
"token_count": 894
} | 29 |
from openai import OpenAIError
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException
openai_error_code_ref_message = "Error reference: https://platform.openai.com/docs/guides/error-codes/api-errors"
def to_openai_error_message(e: Exception) -> str:
ex_type = type(e).__name__
if str(e) == "<empty message>":
msg = "The api key is invalid or revoked. " \
"You can correct or regenerate the api key of your connection."
return f"OpenAI API hits {ex_type}: {msg}"
# for models that do not support the `functions` parameter.
elif "Unrecognized request argument supplied: functions" in str(e):
msg = "Current model does not support the `functions` parameter. If you are using openai connection, then " \
"please use gpt-3.5-turbo, gpt-4, gpt-4-32k, gpt-3.5-turbo-0613 or gpt-4-0613. You can refer to " \
"https://platform.openai.com/docs/guides/gpt/function-calling. If you are using azure openai " \
"connection, then please first go to your Azure OpenAI resource, deploy model 'gpt-35-turbo' or " \
"'gpt-4' with version 0613, then go to prompt flow connection page, upgrade connection api version to " \
"'2023-07-01-preview'. You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/function-calling."
return f"OpenAI API hits {ex_type}: {msg}"
elif "The completion operation does not work with the specified model" in str(e) or \
"logprobs, best_of and echo parameters are not available" in str(e):
msg = "The completion operation does not work with the current model. " \
"Completion API is a legacy api and is going to be deprecated soon. " \
"Please change to use Chat API for current model. " \
"You could refer to guideline at https://aka.ms/pfdoc/chat-prompt " \
"or view the samples in our gallery that contain 'Chat' in the name."
return f"OpenAI API hits {ex_type}: {msg}"
elif "Invalid content type. image_url is only supported by certain models" in str(e):
msg = "Current model does not support the image input. If you are using openai connection, then please use " \
"gpt-4-vision-preview. You can refer to https://platform.openai.com/docs/guides/vision." \
"If you are using azure openai connection, then please first go to your Azure OpenAI resource, " \
"create a GPT-4 Turbo with Vision deployment by selecting model name: \"gpt-4\" and "\
"model version \"vision-preview\". You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/gpt-with-vision"
return f"OpenAI API hits {ex_type}: {msg}"
elif ("\'response_format\' of type" in str(e) and "is not supported with this model." in str(e))\
or ("Additional properties are not allowed" in str(e) and "unexpected) - \'response_format\'" in str(e)):
msg = "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}. " \
"The value associated with the type key should be either 'text' or 'json_object' " \
"If you are using openai connection, you can only set response_format to { \"type\": \"json_object\" } " \
"when calling gpt-3.5-turbo-1106 or gpt-4-1106-preview to enable JSON mode. You can refer to " \
"https://platform.openai.com/docs/guides/text-generation/json-mode. If you are using azure openai " \
"connection, then please first go to your Azure OpenAI resource, deploy model 'gpt-35-turbo-1106' or " \
"'gpt-4-1106-preview'. You can refer to " \
"https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/json-mode?tabs=python."
return f"OpenAI API hits {ex_type}: {msg}"
else:
return f"OpenAI API hits {ex_type}: {str(e)} [{openai_error_code_ref_message}]"
class WrappedOpenAIError(UserErrorException):
"""Refine error messages on top of native openai errors."""
def __init__(self, ex: OpenAIError, **kwargs):
self._ex = ex
super().__init__(target=ErrorTarget.TOOL, **kwargs)
@property
def message(self):
return str(to_openai_error_message(self._ex))
@property
def error_codes(self):
"""The hierarchy of the error codes.
We follow the "Microsoft REST API Guidelines" to define error codes in a hierarchy style.
See the below link for details:
https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses
This list will be converted into an error code hierarchy by the prompt flow framework.
For this case, it will be converted into a data structure that equivalent to:
{
"code": "UserError",
"innerError": {
"code": "OpenAIError",
"innerError": {
"code": self._ex.__class__.__name__,
"innerError": None
}
}
}
"""
return ["UserError", "OpenAIError", self._ex.__class__.__name__]
class ExceedMaxRetryTimes(WrappedOpenAIError):
"""Base exception raised when retry exceeds max times."""
@property
def message(self):
return "Exceed max retry times. " + super().message
class ToolValidationError(UserErrorException):
"""Base exception raised when failed to validate tool."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class LLMError(UserErrorException):
"""Base exception raised when failed to call openai api with non-OpenAIError."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class JinjaTemplateError(ToolValidationError):
"""Base exception raised when failed to render jinja template."""
pass
class ChatAPIInvalidRole(ToolValidationError):
"""Base exception raised when failed to validate chat api role."""
pass
class ChatAPIFunctionRoleInvalidFormat(ToolValidationError):
"""Base exception raised when failed to validate chat api function role format."""
pass
class ChatAPIInvalidFunctions(ToolValidationError):
"""Base exception raised when failed to validate functions when call chat api."""
pass
class FunctionCallNotSupportedInStreamMode(ToolValidationError):
"""Base exception raised when use functions parameter in stream mode when call chat api."""
pass
class InvalidConnectionType(ToolValidationError):
"""Base exception raised when failed to pass invalid connection type."""
pass
class SerpAPISystemError(SystemErrorException):
"""Base exception raised when failed to call serp api with system error."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class SerpAPIUserError(UserErrorException):
"""Base exception raised when failed to call serp api with user error."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class OpenModelLLMOnlineEndpointError(UserErrorException):
"""Base exception raised when the call to an online endpoint failed."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class OpenModelLLMUserError(UserErrorException):
"""Base exception raised when the call to Open Model LLM failed with a user error."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class OpenModelLLMKeyValidationError(ToolValidationError):
"""Base exception raised when failed to validate functions when call chat api."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
class AzureContentSafetyInputValueError(UserErrorException):
"""Base exception raised when the input type of Azure Content Safety is invalid."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
class AzureContentSafetySystemError(SystemErrorException):
"""Base exception raised when failed to call Azure Content Safety api with system error."""
def __init__(self, **kwargs):
super().__init__(**kwargs, target=ErrorTarget.TOOL)
| promptflow/src/promptflow-tools/promptflow/tools/exception.py/0 | {
"file_path": "promptflow/src/promptflow-tools/promptflow/tools/exception.py",
"repo_id": "promptflow",
"token_count": 3068
} | 30 |
import pytest
import json
from promptflow.tools.openai import chat, completion, OpenAI
from promptflow.tools.exception import WrappedOpenAIError
@pytest.fixture
def openai_provider(open_ai_connection) -> OpenAI:
return OpenAI(open_ai_connection)
@pytest.mark.usefixtures("use_secrets_config_file")
@pytest.mark.skip_if_no_api_key("open_ai_connection")
class TestOpenAI:
def test_openai_completion(self, openai_provider):
prompt_template = "please complete this sentence: world war II "
openai_provider.completion(prompt=prompt_template)
def test_openai_stream_completion(self, openai_provider):
prompt_template = "please complete this sentence: world war II "
openai_provider.completion(prompt=prompt_template, stream=True)
def test_openai_completion_api(self, open_ai_connection):
prompt_template = "please complete this sentence: world war II "
completion(open_ai_connection, prompt=prompt_template)
def test_openai_chat(self, openai_provider, example_prompt_template, chat_history):
result = openai_provider.chat(
prompt=example_prompt_template,
model="gpt-3.5-turbo",
max_tokens=32,
temperature=0,
user_input="Fill in more details about trend 2.",
chat_history=chat_history,
)
assert "trend 2" in result.lower()
def test_openai_stream_chat(self, openai_provider, example_prompt_template, chat_history):
result = openai_provider.chat(
prompt=example_prompt_template,
model="gpt-3.5-turbo",
max_tokens=32,
temperature=0,
user_input="Fill in more details about trend 2.",
chat_history=chat_history,
stream=True,
)
answer = ""
while True:
try:
answer += next(result)
except Exception:
break
assert "trend 2" in answer.lower()
def test_openai_chat_api(self, open_ai_connection, example_prompt_template, chat_history):
result = chat(
connection=open_ai_connection,
prompt=example_prompt_template,
model="gpt-3.5-turbo",
max_tokens="inF",
temperature=0,
user_input="Write a slogan for product X",
chat_history=chat_history,
)
assert "Product X".lower() in result.lower()
def test_openai_prompt_with_function(
self, open_ai_connection, example_prompt_template_with_function, functions):
result = chat(
connection=open_ai_connection,
prompt=example_prompt_template_with_function,
model="gpt-3.5-turbo",
temperature=0,
# test input functions.
functions=functions,
# test input prompt containing function role.
name="get_location",
result=json.dumps({"location": "Austin"}),
question="What is the weather in Boston?",
prev_question="Where is Boston?"
)
assert result["function_call"]["name"] == "get_current_weather"
def test_openai_chat_with_response_format(self, open_ai_connection, example_prompt_template, chat_history):
result = chat(
connection=open_ai_connection,
prompt=example_prompt_template,
model="gpt-4-1106-preview",
temperature=0,
user_input="Write a slogan for product X, please reponse with json.",
chat_history=chat_history,
response_format={"type": "json_object"}
)
assert "Product X".lower() in result.lower()
@pytest.mark.parametrize(
"response_format, user_input, error_message, error_codes, exception",
[
({"type": "json"}, "Write a slogan for product X, please reponse with json.",
"\'json\' is not one of [\'json_object\', \'text\']", "UserError/OpenAIError/BadRequestError",
WrappedOpenAIError),
({"type": "json_object"}, "Write a slogan for product X",
"\'messages\' must contain the word \'json\' in some form", "UserError/OpenAIError/BadRequestError",
WrappedOpenAIError),
({"types": "json_object"}, "Write a slogan for product X",
"The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}",
"UserError/OpenAIError/BadRequestError",
WrappedOpenAIError)
]
)
def test_openai_chat_with_invalid_response_format(
self,
open_ai_connection,
example_prompt_template,
chat_history,
response_format,
user_input,
error_message,
error_codes,
exception
):
with pytest.raises(exception) as exc_info:
chat(
connection=open_ai_connection,
prompt=example_prompt_template,
model="gpt-4-1106-preview",
temperature=0,
user_input=user_input,
chat_history=chat_history,
response_format=response_format
)
assert error_message in exc_info.value.message
assert exc_info.value.error_codes == error_codes.split("/")
def test_openai_chat_with_not_support_response_format_json_mode_model(
self,
open_ai_connection,
example_prompt_template,
chat_history
):
with pytest.raises(WrappedOpenAIError) as exc_info:
chat(
connection=open_ai_connection,
prompt=example_prompt_template,
model="gpt-3.5-turbo",
temperature=0,
user_input="Write a slogan for product X, please reponse with json.",
chat_history=chat_history,
response_format={"type": "json_object"}
)
error_message = "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}."
assert error_message in exc_info.value.message
assert exc_info.value.error_codes == "UserError/OpenAIError/BadRequestError".split("/")
def test_openai_chat_with_response_format_text_mode(
self,
open_ai_connection,
example_prompt_template,
chat_history
):
result = chat(
connection=open_ai_connection,
prompt=example_prompt_template,
model="gpt-3.5-turbo",
temperature=0,
user_input="Write a slogan for product X.",
chat_history=chat_history,
response_format={"type": "text"}
)
assert "Product X".lower() in result.lower()
| promptflow/src/promptflow-tools/tests/test_openai.py/0 | {
"file_path": "promptflow/src/promptflow-tools/tests/test_openai.py",
"repo_id": "promptflow",
"token_count": 3123
} | 31 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# pylint: disable=wrong-import-position
import json
import time
from promptflow._cli._pf.help import show_privacy_statement, show_welcome_message
from promptflow._cli._user_agent import USER_AGENT
from promptflow._cli._utils import _get_cli_activity_name, get_client_info_for_cli
from promptflow._sdk._telemetry import ActivityType, get_telemetry_logger, log_activity
# Log the start time
start_time = time.perf_counter()
# E402 module level import not at top of file
import argparse # noqa: E402
import logging # noqa: E402
import sys # noqa: E402
from promptflow._cli._pf_azure._flow import add_parser_flow, dispatch_flow_commands # noqa: E402
from promptflow._cli._pf_azure._run import add_parser_run, dispatch_run_commands # noqa: E402
from promptflow._sdk._utils import ( # noqa: E402
get_promptflow_sdk_version,
print_pf_version,
setup_user_agent_to_operation_context,
)
from promptflow._utils.logger_utils import get_cli_sdk_logger # noqa: E402
# get logger for CLI
logger = get_cli_sdk_logger()
def run_command(args):
# Log the init finish time
init_finish_time = time.perf_counter()
try:
# --verbose, enable info logging
if hasattr(args, "verbose") and args.verbose:
for handler in logger.handlers:
handler.setLevel(logging.INFO)
# --debug, enable debug logging
if hasattr(args, "debug") and args.debug:
for handler in logger.handlers:
handler.setLevel(logging.DEBUG)
if args.version:
print_pf_version()
elif args.action == "run":
dispatch_run_commands(args)
elif args.action == "flow":
dispatch_flow_commands(args)
except KeyboardInterrupt as ex:
logger.debug("Keyboard interrupt is captured.")
raise ex
except SystemExit as ex: # some code directly call sys.exit, this is to make sure command metadata is logged
exit_code = ex.code if ex.code is not None else 1
logger.debug(f"Code directly call sys.exit with code {exit_code}")
raise ex
except Exception as ex:
logger.debug(f"Command {args} execute failed. {str(ex)}")
raise ex
finally:
# Log the invoke finish time
invoke_finish_time = time.perf_counter()
logger.info(
"Command ran in %.3f seconds (init: %.3f, invoke: %.3f)",
invoke_finish_time - start_time,
init_finish_time - start_time,
invoke_finish_time - init_finish_time,
)
def get_parser_args(argv):
parser = argparse.ArgumentParser(
prog="pfazure",
formatter_class=argparse.RawDescriptionHelpFormatter,
description="pfazure: manage prompt flow assets in azure. Learn more: https://microsoft.github.io/promptflow.",
)
parser.add_argument(
"-v", "--version", dest="version", action="store_true", help="show current CLI version and exit"
)
subparsers = parser.add_subparsers()
add_parser_run(subparsers)
add_parser_flow(subparsers)
return parser.prog, parser.parse_args(argv)
def _get_workspace_info(args):
try:
subscription_id, resource_group_name, workspace_name = get_client_info_for_cli(
subscription_id=args.subscription,
resource_group_name=args.resource_group,
workspace_name=args.workspace_name,
)
return {
"subscription_id": subscription_id,
"resource_group_name": resource_group_name,
"workspace_name": workspace_name,
}
except Exception:
# fall back to empty dict if workspace info is not available
return {}
def entry(argv):
"""
Control plane CLI tools for promptflow cloud version.
"""
prog, args = get_parser_args(argv)
if hasattr(args, "user_agent"):
setup_user_agent_to_operation_context(args.user_agent)
logger = get_telemetry_logger()
custom_dimensions = _get_workspace_info(args)
with log_activity(
logger,
_get_cli_activity_name(cli=prog, args=args),
activity_type=ActivityType.PUBLICAPI,
custom_dimensions=custom_dimensions,
):
run_command(args)
def main():
"""Entrance of pf CLI."""
command_args = sys.argv[1:]
if len(command_args) == 1 and command_args[0] == "version":
version_dict = {"promptflow": get_promptflow_sdk_version()}
return json.dumps(version_dict, ensure_ascii=False, indent=2, sort_keys=True, separators=(",", ": ")) + "\n"
if len(command_args) == 0:
# print privacy statement & welcome message like azure-cli
show_privacy_statement()
show_welcome_message()
command_args.append("-h")
elif len(command_args) == 1:
# pfazure only has "pf --version" with 1 layer
if command_args[0] not in ["--version", "-v"]:
command_args.append("-h")
setup_user_agent_to_operation_context(USER_AGENT)
entry(command_args)
if __name__ == "__main__":
main()
| promptflow/src/promptflow/promptflow/_cli/_pf_azure/entry.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/_pf_azure/entry.py",
"repo_id": "promptflow",
"token_count": 2094
} | 32 |
{"groundtruth": "App", "prediction": "App"}
| promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/data.jsonl/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_cli/data/evaluation_flow/data.jsonl",
"repo_id": "promptflow",
"token_count": 15
} | 33 |
from traceback import TracebackException
from promptflow._utils.exception_utils import (
ADDITIONAL_INFO_USER_EXECUTION_ERROR,
is_pf_core_frame,
last_frame_info,
remove_suffix,
)
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException, ValidationException
class UnexpectedError(SystemErrorException):
"""Exception raised for unexpected errors that should not occur under normal circumstances."""
pass
class NotSupported(UserErrorException):
"""This exception should be raised when a feature is not supported by the package or product.
Customers should take action, such as upgrading the package or using the product in the correct way, to resolve it.
"""
pass
class PackageToolNotFoundError(ValidationException):
"""Exception raised when package tool is not found in the current runtime environment."""
pass
class MissingRequiredInputs(ValidationException):
pass
class InputTypeMismatch(ValidationException):
pass
class ToolCanceledError(UserErrorException):
"""Exception raised when tool execution is canceled."""
pass
class InvalidSource(ValidationException):
pass
class ToolLoadError(UserErrorException):
"""Exception raised when tool load failed."""
def __init__(self, module: str = None, **kwargs):
super().__init__(target=ErrorTarget.TOOL, module=module, **kwargs)
class ToolExecutionError(UserErrorException):
"""Exception raised when tool execution failed."""
def __init__(self, *, node_name: str, module: str = None):
self._node_name = node_name
super().__init__(target=ErrorTarget.TOOL, module=module)
@property
def message(self):
if self.inner_exception:
error_type_and_message = f"({self.inner_exception.__class__.__name__}) {self.inner_exception}"
return remove_suffix(self._message, ".") + f": {error_type_and_message}"
else:
return self._message
@property
def message_format(self):
return "Execution failure in '{node_name}'."
@property
def message_parameters(self):
return {"node_name": self._node_name}
@property
def tool_last_frame_info(self):
"""Return the line number inside the tool where the error occurred."""
return last_frame_info(self.inner_exception)
@property
def tool_traceback(self):
"""Return the traceback inside the tool's source code scope.
The traceback inside the promptflow's internal code will be taken off.
"""
exc = self.inner_exception
if exc and exc.__traceback__ is not None:
tb = exc.__traceback__.tb_next
if tb is not None:
# The first frames are always our code invoking the tool.
# We do not want to dump it to user code's traceback.
# So, skip these frames from pf core module.
while is_pf_core_frame(tb.tb_frame) and tb.tb_next is not None:
tb = tb.tb_next
# We don't use traceback.format_exception since its interface differs between 3.8 and 3.10.
# Use this internal class to adapt to different python versions.
te = TracebackException(type(exc), exc, tb)
formatted_tb = "".join(te.format())
return formatted_tb
return None
@property
def additional_info(self):
"""Set the tool exception details as additional info."""
if not self.inner_exception:
# Only populate additional info when inner exception is present.
return None
info = {
"type": self.inner_exception.__class__.__name__,
"message": str(self.inner_exception),
"traceback": self.tool_traceback,
}
info.update(self.tool_last_frame_info)
return {
ADDITIONAL_INFO_USER_EXECUTION_ERROR: info,
}
class GenerateMetaUserError(UserErrorException):
"""Base exception raised when failed to validate tool."""
def __init__(self, **kwargs):
super().__init__(target=ErrorTarget.EXECUTOR, **kwargs)
class MetaFileNotFound(GenerateMetaUserError):
pass
class MetaFileReadError(GenerateMetaUserError):
pass
class RunRecordNotFound(SystemErrorException):
pass
class FlowOutputUnserializable(UserErrorException):
pass
class ProcessPoolError(SystemErrorException):
pass
class DuplicateToolMappingError(ValidationException):
"""Exception raised when multiple tools are linked to the same deprecated tool id."""
pass
| promptflow/src/promptflow/promptflow/_core/_errors.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_core/_errors.py",
"repo_id": "promptflow",
"token_count": 1699
} | 34 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
# flake8: noqa
"""Put some imports here for internal packages to minimize the effort of refactoring."""
from promptflow._constants import PROMPTFLOW_CONNECTIONS
from promptflow._core._errors import GenerateMetaUserError, PackageToolNotFoundError, ToolExecutionError
from promptflow._core.cache_manager import AbstractCacheManager, CacheManager, enable_cache
from promptflow._core.connection_manager import ConnectionManager
from promptflow._core.flow_execution_context import FlowExecutionContext
from promptflow._core.log_manager import NodeLogManager, NodeLogWriter
from promptflow._core.metric_logger import add_metric_logger
from promptflow._core.openai_injector import inject_openai_api
from promptflow._core.operation_context import OperationContext
from promptflow._core.run_tracker import RunRecordNotFound, RunTracker
from promptflow._core.tool import ToolInvoker, ToolProvider, tool
from promptflow._core.tool_meta_generator import (
JinjaParsingError,
MultipleToolsDefined,
NoToolDefined,
PythonParsingError,
ReservedVariableCannotBeUsed,
generate_prompt_meta,
generate_python_meta,
generate_tool_meta_dict_by_file,
is_tool,
)
from promptflow._core.tools_manager import (
BuiltinsManager,
CustomPythonToolLoadError,
EmptyCodeInCustomTool,
MissingTargetFunction,
ToolsManager,
builtins,
collect_package_tools,
gen_dynamic_list,
register_apis,
register_builtins,
register_connections,
retrieve_tool_func_result,
)
from promptflow._core.tracer import Tracer
from promptflow._sdk._constants import LOCAL_MGMT_DB_PATH
from promptflow._sdk._serving.response_creator import ResponseCreator
from promptflow._sdk._serving.swagger import generate_swagger
from promptflow._sdk._serving.utils import (
get_output_fields_to_remove,
get_sample_json,
handle_error_to_response,
load_request_data,
streaming_response_required,
validate_request_data,
)
from promptflow._sdk._utils import (
get_used_connection_names_from_environment_variables,
setup_user_agent_to_operation_context,
update_environment_variables_with_connections,
)
from promptflow._utils.context_utils import _change_working_dir, inject_sys_path
from promptflow._utils.credential_scrubber import CredentialScrubber
from promptflow._utils.dataclass_serializer import deserialize_dataclass, serialize
from promptflow._utils.exception_utils import (
ErrorResponse,
ExceptionPresenter,
JsonSerializedPromptflowException,
RootErrorCode,
infer_error_code_from_class,
)
from promptflow._utils.execution_utils import handle_line_failures
from promptflow._utils.feature_utils import Feature, FeatureState, get_feature_list
from promptflow._utils.logger_utils import (
DATETIME_FORMAT,
LOG_FORMAT,
CredentialScrubberFormatter,
FileHandler,
FileHandlerConcurrentWrapper,
LogContext,
bulk_logger,
flow_logger,
get_logger,
logger,
update_log_path,
)
from promptflow._utils.multimedia_data_converter import (
AbstractMultimediaInfoConverter,
MultimediaConverter,
MultimediaInfo,
ResourceType,
)
from promptflow._utils.multimedia_utils import (
_create_image_from_file,
convert_multimedia_data_to_base64,
is_multimedia_dict,
persist_multimedia_data,
resolve_multimedia_data_recursively,
)
from promptflow._utils.utils import (
AttrDict,
camel_to_snake,
count_and_log_progress,
load_json,
reverse_transpose,
set_context,
transpose,
)
from promptflow._version import VERSION
from promptflow.batch._batch_inputs_processor import apply_inputs_mapping
from promptflow.executor._errors import InputNotFound
from promptflow.executor._tool_invoker import DefaultToolInvoker
from promptflow.storage._run_storage import DefaultRunStorage
| promptflow/src/promptflow/promptflow/_internal/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_internal/__init__.py",
"repo_id": "promptflow",
"token_count": 1306
} | 35 |
# Prompt Flow Service
This document will describe the usage of pfs(prompt flow service) CLI.
### Start prompt flow service (optional)
If you don't install pfs as a service, you need to start pfs manually.
pfs CLI provides **start** command to start service. You can also use this command to specify the service port.
```commandline
usage: pfs [-h] [-p PORT]
Start prompt flow service.
optional arguments:
-h, --help show this help message and exit
-p PORT, --port PORT port of the promptflow service
```
If you don't specify a port to start service, pfs will first use the port in the configure file in "~/.promptflow/pfs.port".
If not found port configuration or the port is used, pfs will use a random port to start the service.
### Swagger of service
After start the service, it will provide Swagger UI documentation, served from "http://localhost:your-port/v1.0/swagger.json".
For details, please refer to [swagger.json](./swagger.json).
#### Generate C# client
1. Right click the project, Add -> Rest API Client... -> Generate with OpenAPI Generator
2. It will open a dialog, fill in the file name and swagger url, it will generate the client under the project.
For details, please refer to [REST API Client Code Generator](https://marketplace.visualstudio.com/items?itemName=ChristianResmaHelle.ApiClientCodeGenerator2022). | promptflow/src/promptflow/promptflow/_sdk/_service/README.md/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_service/README.md",
"repo_id": "promptflow",
"token_count": 387
} | 36 |
# ---------------------------------------------------------
# Copyright (c) 2013-2022 Caleb P. Burns credits dahlia <https://github.com/dahlia>
# Licensed under the MPLv2 License. See License.txt in the project root for
# license information.
# ---------------------------------------------------------
"""
This file code has been vendored from pathspec repo.
Please do not edit it, unless really necessary
"""
import dataclasses
import os
import posixpath
import re
import warnings
from typing import Any, AnyStr, Iterable, Iterator
from typing import Match as MatchHint
from typing import Optional
from typing import Pattern as PatternHint
from typing import Tuple, Union
NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
# The encoding to use when parsing a byte string pattern.
# This provides the base definition for patterns.
_BYTES_ENCODING = "latin1"
class Pattern(object):
"""
The :class:`Pattern` class is the abstract definition of a pattern.
"""
# Make the class dict-less.
__slots__ = ("include",)
def __init__(self, include: Optional[bool]) -> None:
"""
Initializes the :class:`Pattern` instance.
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
self.include = include
"""
*include* (:class:`bool` or :data:`None`) is whether the matched
files should be included (:data:`True`), excluded (:data:`False`),
or is a null-operation (:data:`None`).
"""
def match(self, files: Iterable[str]) -> Iterator[str]:
"""
DEPRECATED: This method is no longer used and has been replaced by
:meth:`.match_file`. Use the :meth:`.match_file` method with a loop
for similar results.
Matches this pattern against the specified files.
*files* (:class:`~collections.abc.Iterable` of :class:`str`)
contains each file relative to the root directory (e.g.,
:data:`"relative/path/to/file"`).
Returns an :class:`~collections.abc.Iterable` yielding each matched
file path (:class:`str`).
"""
warnings.warn(
(
"{0.__module__}.{0.__qualname__}.match() is deprecated. Use "
"{0.__module__}.{0.__qualname__}.match_file() with a loop for "
"similar results."
).format(self.__class__),
DeprecationWarning,
stacklevel=2,
)
for file in files:
if self.match_file(file) is not None:
yield file
def match_file(self, file: str) -> Optional[Any]:
"""
Matches this pattern against the specified file.
*file* (:class:`str`) is the normalized file path to match against.
Returns the match result if *file* matched; otherwise, :data:`None`.
"""
raise NotImplementedError(
("{0.__module__}.{0.__qualname__} must override match_file().").format(self.__class__)
)
class RegexPattern(Pattern):
"""
The :class:`RegexPattern` class is an implementation of a pattern
using regular expressions.
"""
# Keep the class dict-less.
__slots__ = ("regex",)
def __init__(
self,
pattern: Union[AnyStr, PatternHint],
include: Optional[bool] = None,
) -> None:
"""
Initializes the :class:`RegexPattern` instance.
*pattern* (:class:`str`, :class:`bytes`, :class:`re.Pattern`, or
:data:`None`) is the pattern to compile into a regular expression.
*include* (:class:`bool` or :data:`None`) must be :data:`None`
unless *pattern* is a precompiled regular expression (:class:`re.Pattern`)
in which case it is whether matched files should be included
(:data:`True`), excluded (:data:`False`), or is a null operation
(:data:`None`).
.. NOTE:: Subclasses do not need to support the *include*
parameter.
"""
if isinstance(pattern, (str, bytes)):
assert include is None, ("include:{!r} must be null when pattern:{!r} is a string.").format(
include, pattern
)
regex, include = self.pattern_to_regex(pattern)
# NOTE: Make sure to allow a null regular expression to be
# returned for a null-operation.
if include is not None:
regex = re.compile(regex)
elif pattern is not None and hasattr(pattern, "match"):
# Assume pattern is a precompiled regular expression.
# - NOTE: Used specified *include*.
regex = pattern
elif pattern is None:
# NOTE: Make sure to allow a null pattern to be passed for a
# null-operation.
assert include is None, ("include:{!r} must be null when pattern:{!r} is null.").format(include, pattern)
else:
raise TypeError("pattern:{!r} is not a string, re.Pattern, or None.".format(pattern))
super(RegexPattern, self).__init__(include)
self.regex: PatternHint = regex
"""
*regex* (:class:`re.Pattern`) is the regular expression for the
pattern.
"""
def __eq__(self, other: "RegexPattern") -> bool:
"""
Tests the equality of this regex pattern with *other* (:class:`RegexPattern`)
by comparing their :attr:`~Pattern.include` and :attr:`~RegexPattern.regex`
attributes.
"""
if isinstance(other, RegexPattern):
return self.include == other.include and self.regex == other.regex
return NotImplemented
def match_file(self, file: str) -> Optional["RegexMatchResult"]:
"""
Matches this pattern against the specified file.
*file* (:class:`str`)
contains each file relative to the root directory (e.g., "relative/path/to/file").
Returns the match result (:class:`RegexMatchResult`) if *file*
matched; otherwise, :data:`None`.
"""
if self.include is not None:
match = self.regex.match(file)
if match is not None:
return RegexMatchResult(match)
return None
@classmethod
def pattern_to_regex(cls, pattern: str) -> Tuple[str, bool]:
"""
Convert the pattern into an un-compiled regular expression.
*pattern* (:class:`str`) is the pattern to convert into a regular
expression.
Returns the un-compiled regular expression (:class:`str` or :data:`None`),
and whether matched files should be included (:data:`True`),
excluded (:data:`False`), or is a null-operation (:data:`None`).
.. NOTE:: The default implementation simply returns *pattern* and
:data:`True`.
"""
return pattern, True
@dataclasses.dataclass()
class RegexMatchResult(object):
"""
The :class:`RegexMatchResult` data class is used to return information
about the matched regular expression.
"""
# Keep the class dict-less.
__slots__ = ("match",)
match: MatchHint
"""
*match* (:class:`re.Match`) is the regex match result.
"""
class GitWildMatchPatternError(ValueError):
"""
The :class:`GitWildMatchPatternError` indicates an invalid git wild match
pattern.
"""
class GitWildMatchPattern(RegexPattern):
"""
The :class:`GitWildMatchPattern` class represents a compiled Git
wildmatch pattern.
"""
# Keep the dict-less class hierarchy.
__slots__ = ()
@classmethod
# pylint: disable=too-many-branches,too-many-statements
def pattern_to_regex(
cls,
pattern: AnyStr,
) -> Tuple[Optional[AnyStr], Optional[bool]]:
"""
Convert the pattern into a regular expression.
*pattern* (:class:`str` or :class:`bytes`) is the pattern to convert
into a regular expression.
Returns the un-compiled regular expression (:class:`str`, :class:`bytes`,
or :data:`None`); and whether matched files should be included
(:data:`True`), excluded (:data:`False`), or if it is a
null-operation (:data:`None`).
"""
if isinstance(pattern, str):
return_type = str
elif isinstance(pattern, bytes):
return_type = bytes
pattern = pattern.decode(_BYTES_ENCODING)
else:
raise TypeError(f"pattern:{pattern!r} is not a unicode or byte string.")
original_pattern = pattern
pattern = pattern.strip()
if pattern.startswith("#"):
# A pattern starting with a hash ('#') serves as a comment
# (neither includes nor excludes files). Escape the hash with a
# back-slash to match a literal hash (i.e., '\#').
regex = None
include = None
elif pattern == "/":
# EDGE CASE: According to `git check-ignore` (v2.4.1), a single
# '/' does not match any file.
regex = None
include = None
elif pattern:
if pattern.startswith("!"):
# A pattern starting with an exclamation mark ('!') negates the
# pattern (exclude instead of include). Escape the exclamation
# mark with a back-slash to match a literal exclamation mark
# (i.e., '\!').
include = False
# Remove leading exclamation mark.
pattern = pattern[1:]
else:
include = True
# Allow a regex override for edge cases that cannot be handled
# through normalization.
override_regex = None
# Split pattern into segments.
pattern_segments = pattern.split("/")
# Normalize pattern to make processing easier.
# EDGE CASE: Deal with duplicate double-asterisk sequences.
# Collapse each sequence down to one double-asterisk. Iterate over
# the segments in reverse and remove the duplicate double
# asterisks as we go.
for i in range(len(pattern_segments) - 1, 0, -1):
prev = pattern_segments[i - 1]
seg = pattern_segments[i]
if prev == "**" and seg == "**":
del pattern_segments[i]
if len(pattern_segments) == 2 and pattern_segments[0] == "**" and not pattern_segments[1]:
# EDGE CASE: The '**/' pattern should match everything except
# individual files in the root directory. This case cannot be
# adequately handled through normalization. Use the override.
override_regex = "^.+(?P<ps_d>/).*$"
if not pattern_segments[0]:
# A pattern beginning with a slash ('/') will only match paths
# directly on the root directory instead of any descendant
# paths. So, remove empty first segment to make pattern relative
# to root.
del pattern_segments[0]
elif len(pattern_segments) == 1 or (len(pattern_segments) == 2 and not pattern_segments[1]):
# A single pattern without a beginning slash ('/') will match
# any descendant path. This is equivalent to "**/{pattern}". So,
# prepend with double-asterisks to make pattern relative to
# root.
# EDGE CASE: This also holds for a single pattern with a
# trailing slash (e.g. dir/).
if pattern_segments[0] != "**":
pattern_segments.insert(0, "**")
else:
# EDGE CASE: A pattern without a beginning slash ('/') but
# contains at least one prepended directory (e.g.
# "dir/{pattern}") should not match "**/dir/{pattern}",
# according to `git check-ignore` (v2.4.1).
pass
if not pattern_segments:
# After resolving the edge cases, we end up with no pattern at
# all. This must be because the pattern is invalid.
raise GitWildMatchPatternError(f"Invalid git pattern: {original_pattern!r}")
if not pattern_segments[-1] and len(pattern_segments) > 1:
# A pattern ending with a slash ('/') will match all descendant
# paths if it is a directory but not if it is a regular file.
# This is equivalent to "{pattern}/**". So, set last segment to
# a double-asterisk to include all descendants.
pattern_segments[-1] = "**"
if override_regex is None:
# Build regular expression from pattern.
output = ["^"]
need_slash = False
end = len(pattern_segments) - 1
for i, seg in enumerate(pattern_segments):
if seg == "**":
if i == 0 and i == end:
# A pattern consisting solely of double-asterisks ('**')
# will match every path.
output.append(".+")
elif i == 0:
# A normalized pattern beginning with double-asterisks
# ('**') will match any leading path segments.
output.append("(?:.+/)?")
need_slash = False
elif i == end:
# A normalized pattern ending with double-asterisks ('**')
# will match any trailing path segments.
output.append("(?P<ps_d>/).*")
else:
# A pattern with inner double-asterisks ('**') will match
# multiple (or zero) inner path segments.
output.append("(?:/.+)?")
need_slash = True
elif seg == "*":
# Match single path segment.
if need_slash:
output.append("/")
output.append("[^/]+")
if i == end:
# A pattern ending without a slash ('/') will match a file
# or a directory (with paths underneath it). E.g., "foo"
# matches "foo", "foo/bar", "foo/bar/baz", etc.
output.append("(?:(?P<ps_d>/).*)?")
need_slash = True
else:
# Match segment glob pattern.
if need_slash:
output.append("/")
try:
output.append(cls._translate_segment_glob(seg))
except ValueError as e:
raise GitWildMatchPatternError(f"Invalid git pattern: {original_pattern!r}") from e
if i == end:
# A pattern ending without a slash ('/') will match a file
# or a directory (with paths underneath it). E.g., "foo"
# matches "foo", "foo/bar", "foo/bar/baz", etc.
output.append("(?:(?P<ps_d>/).*)?")
need_slash = True
output.append("$")
regex = "".join(output)
else:
# Use regex override.
regex = override_regex
else:
# A blank pattern is a null-operation (neither includes nor
# excludes files).
regex = None
include = None
if regex is not None and return_type is bytes:
regex = regex.encode(_BYTES_ENCODING)
return regex, include
@staticmethod
def _translate_segment_glob(pattern: str) -> str:
"""
Translates the glob pattern to a regular expression. This is used in
the constructor to translate a path segment glob pattern to its
corresponding regular expression.
*pattern* (:class:`str`) is the glob pattern.
Returns the regular expression (:class:`str`).
"""
# NOTE: This is derived from `fnmatch.translate()` and is similar to
# the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
escape = False
regex = ""
i, end = 0, len(pattern)
while i < end:
# Get next character.
char = pattern[i]
i += 1
if escape:
# Escape the character.
escape = False
regex += re.escape(char)
elif char == "\\":
# Escape character, escape next character.
escape = True
elif char == "*":
# Multi-character wildcard. Match any string (except slashes),
# including an empty string.
regex += "[^/]*"
elif char == "?":
# Single-character wildcard. Match any single character (except
# a slash).
regex += "[^/]"
elif char == "[":
# Bracket expression wildcard. Except for the beginning
# exclamation mark, the whole bracket expression can be used
# directly as regex but we have to find where the expression
# ends.
# - "[][!]" matches ']', '[' and '!'.
# - "[]-]" matches ']' and '-'.
# - "[!]a-]" matches any character except ']', 'a' and '-'.
j = i
# Pass back expression negation.
if j < end and pattern[j] == "!":
j += 1
# Pass first closing bracket if it is at the beginning of the
# expression.
if j < end and pattern[j] == "]":
j += 1
# Find closing bracket. Stop once we reach the end or find it.
while j < end and pattern[j] != "]":
j += 1
if j < end:
# Found end of bracket expression. Increment j to be one past
# the closing bracket:
#
# [...]
# ^ ^
# i j
#
j += 1
expr = "["
if pattern[i] == "!":
# Bracket expression needs to be negated.
expr += "^"
i += 1
elif pattern[i] == "^":
# POSIX declares that the regex bracket expression negation
# "[^...]" is undefined in a glob pattern. Python's
# `fnmatch.translate()` escapes the caret ('^') as a
# literal. To maintain consistency with undefined behavior,
# I am escaping the '^' as well.
expr += "\\^"
i += 1
# Build regex bracket expression. Escape slashes so they are
# treated as literal slashes by regex as defined by POSIX.
expr += pattern[i:j].replace("\\", "\\\\")
# Add regex bracket expression to regex result.
regex += expr
# Set i to one past the closing bracket.
i = j
else:
# Failed to find closing bracket, treat opening bracket as a
# bracket literal instead of as an expression.
regex += "\\["
else:
# Regular character, escape it for regex.
regex += re.escape(char)
if escape:
raise ValueError(f"Escape character found with no next character to escape: {pattern!r}")
return regex
@staticmethod
def escape(s: AnyStr) -> AnyStr:
"""
Escape special characters in the given string.
*s* (:class:`str` or :class:`bytes`) a filename or a string that you
want to escape, usually before adding it to a ".gitignore".
Returns the escaped string (:class:`str` or :class:`bytes`).
"""
if isinstance(s, str):
return_type = str
string = s
elif isinstance(s, bytes):
return_type = bytes
string = s.decode(_BYTES_ENCODING)
else:
raise TypeError(f"s:{s!r} is not a unicode or byte string.")
# Reference: https://git-scm.com/docs/gitignore#_pattern_format
meta_characters = r"[]!*#?"
out_string = "".join("\\" + x if x in meta_characters else x for x in string)
if return_type is bytes:
return out_string.encode(_BYTES_ENCODING)
return out_string
def normalize_file(file, separators=None):
# type - (Union[Text, PathLike], Optional[Collection[Text]]) -> Text
"""
Normalizes the file path to use the POSIX path separator (i.e.,
``'/'``), and make the paths relative (remove leading ``'/'``).
*file* (:class:`str` or :class:`pathlib.PurePath`) is the file path.
*separators* (:class:`~collections.abc.Collection` of :class:`str`; or
:data:`None`) optionally contains the path separators to normalize.
This does not need to include the POSIX path separator (``'/'``), but
including it will not affect the results. Default is :data:`None` for
:data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
container (e.g., an empty tuple ``()``).
Returns the normalized file path (:class:`str`).
"""
# Normalize path separators.
if separators is None:
separators = NORMALIZE_PATH_SEPS
# Convert path object to string.
norm_file = str(file)
for sep in separators:
norm_file = norm_file.replace(sep, posixpath.sep)
if norm_file.startswith("/"):
# Make path relative.
norm_file = norm_file[1:]
elif norm_file.startswith("./"):
# Remove current directory prefix.
norm_file = norm_file[2:]
return norm_file
| promptflow/src/promptflow/promptflow/_sdk/_vendor/_pathspec.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/_vendor/_pathspec.py",
"repo_id": "promptflow",
"token_count": 10455
} | 37 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import abc
from typing import Dict, Optional
from promptflow._sdk._constants import BASE_PATH_CONTEXT_KEY, CommonYamlFields
from promptflow._sdk._utils import load_from_dict
from promptflow._utils.yaml_utils import dump_yaml
class YAMLTranslatableMixin(abc.ABC):
@classmethod
# pylint: disable=unused-argument
def _resolve_cls_and_type(cls, data, params_override: Optional[list]):
"""Resolve the class to use for deserializing the data. Return current class if no override is provided.
:param data: Data to deserialize.
:type data: dict
:param params_override: Parameters to override, defaults to None
:type params_override: typing.Optional[list]
:return: Class to use for deserializing the data & its "type". Type will be None if no override is provided.
:rtype: tuple[class, typing.Optional[str]]
"""
@classmethod
def _get_schema_cls(self):
pass
def _to_dict(self) -> Dict:
schema_cls = self._get_schema_cls()
return schema_cls(context={BASE_PATH_CONTEXT_KEY: "./"}).dump(self)
def _to_yaml(self) -> str:
return dump_yaml(self._to_dict())
def __str__(self):
try:
return self._to_yaml()
except BaseException: # pylint: disable=broad-except
return super(YAMLTranslatableMixin, self).__str__()
@classmethod
def _load_from_dict(cls, data: Dict, context: Dict, additional_message: str, **kwargs):
schema_cls = cls._get_schema_cls()
loaded_data = load_from_dict(schema_cls, data, context, additional_message, **kwargs)
# pop the type field since it already exists in class init
loaded_data.pop(CommonYamlFields.TYPE, None)
return cls(base_path=context[BASE_PATH_CONTEXT_KEY], **loaded_data)
| promptflow/src/promptflow/promptflow/_sdk/entities/_yaml_translatable.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/entities/_yaml_translatable.py",
"repo_id": "promptflow",
"token_count": 744
} | 38 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import os.path
from dotenv import dotenv_values
from marshmallow import fields, post_load, pre_load
from promptflow._sdk._utils import is_remote_uri
from promptflow._sdk.schemas._base import PatchedSchemaMeta, YamlFileSchema
from promptflow._sdk.schemas._fields import LocalPathField, NestedField, UnionField
from promptflow._utils.logger_utils import get_cli_sdk_logger
logger = get_cli_sdk_logger()
def _resolve_dot_env_file(data, **kwargs):
"""Resolve .env file to environment variables."""
env_var = data.get("environment_variables", None)
try:
if env_var and os.path.exists(env_var):
env_dict = dotenv_values(env_var)
data["environment_variables"] = env_dict
except TypeError:
pass
return data
class ResourcesSchema(metaclass=PatchedSchemaMeta):
"""Schema for resources."""
instance_type = fields.Str()
idle_time_before_shutdown_minutes = fields.Int()
class RemotePathStr(fields.Str):
default_error_messages = {
"invalid_path": "Invalid remote path. "
"Currently only azureml://xxx or public URL(e.g. https://xxx) are supported.",
}
def _validate(self, value):
# inherited validations like required, allow_none, etc.
super(RemotePathStr, self)._validate(value)
if value is None:
return
if not is_remote_uri(value):
raise self.make_error(
"invalid_path",
)
class RemoteFlowStr(fields.Str):
default_error_messages = {
"invalid_path": "Invalid remote flow path. Currently only azureml:<flow-name> is supported",
}
def _validate(self, value):
# inherited validations like required, allow_none, etc.
super(RemoteFlowStr, self)._validate(value)
if value is None:
return
if not isinstance(value, str) or not value.startswith("azureml:"):
raise self.make_error(
"invalid_path",
)
class RunSchema(YamlFileSchema):
"""Base schema for all run schemas."""
# TODO(2898455): support directly write path/flow + entry in run.yaml
# region: common fields
name = fields.Str()
display_name = fields.Str(required=False)
tags = fields.Dict(keys=fields.Str(), values=fields.Str(allow_none=True))
status = fields.Str(dump_only=True)
description = fields.Str(attribute="description")
properties = fields.Dict(keys=fields.Str(), values=fields.Str(allow_none=True))
# endregion: common fields
flow = UnionField([LocalPathField(required=True), RemoteFlowStr(required=True)])
# inputs field
data = UnionField([LocalPathField(), RemotePathStr()])
column_mapping = fields.Dict(keys=fields.Str)
# runtime field, only available for cloud run
runtime = fields.Str()
resources = NestedField(ResourcesSchema)
run = fields.Str()
# region: context
variant = fields.Str()
environment_variables = UnionField(
[
fields.Dict(keys=fields.Str(), values=fields.Str()),
# support load environment variables from .env file
LocalPathField(),
]
)
connections = fields.Dict(keys=fields.Str(), values=fields.Dict(keys=fields.Str()))
# endregion: context
# region: command node
command = fields.Str(dump_only=True)
outputs = fields.Dict(key=fields.Str(), dump_only=True)
# endregion: command node
@post_load
def resolve_dot_env_file(self, data, **kwargs):
return _resolve_dot_env_file(data, **kwargs)
@pre_load
def warning_unknown_fields(self, data, **kwargs):
# log warnings for unknown schema fields
unknown_fields = set(data) - set(self.fields)
if unknown_fields:
logger.warning("Run schema validation warnings. Unknown fields found: %s", unknown_fields)
return data
| promptflow/src/promptflow/promptflow/_sdk/schemas/_run.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_sdk/schemas/_run.py",
"repo_id": "promptflow",
"token_count": 1523
} | 39 |
import tiktoken
from importlib.metadata import version
from promptflow.exceptions import UserErrorException
IS_LEGACY_OPENAI = version("openai").startswith("0.")
class OpenAIMetricsCalculator:
def __init__(self, logger=None) -> None:
self._logger = logger
def get_openai_metrics_from_api_call(self, api_call: dict):
total_metrics = {}
if self._need_collect_metrics(api_call):
try:
metrics = self._get_openai_metrics_for_signal_api(api_call)
self.merge_metrics_dict(total_metrics, metrics)
except Exception as ex:
self._log_warning(f"Failed to calculate metrics due to exception: {ex}.")
children = api_call.get("children")
if children is not None:
for child in children:
child_metrics = self.get_openai_metrics_from_api_call(child)
self.merge_metrics_dict(total_metrics, child_metrics)
api_call["system_metrics"] = total_metrics
return total_metrics
def _need_collect_metrics(self, api_call: dict):
if api_call.get("type") != "LLM":
return False
output = api_call.get("output")
if not isinstance(output, dict) and not isinstance(output, list):
return False
inputs = api_call.get("inputs")
if not isinstance(inputs, dict):
return False
return True
def _get_openai_metrics_for_signal_api(self, api_call: dict):
output = api_call.get("output")
if isinstance(output, dict):
usage = output.get("usage")
if isinstance(usage, dict):
return usage
self._log_warning(
"Cannot find openai metrics in output, "
"will calculate metrics from response data directly."
)
name = api_call.get("name")
# Support both legacy api and OpenAI v1 api
# Legacy api:
# https://github.com/openai/openai-python/blob/v0.28.1/openai/api_resources/chat_completion.py
# https://github.com/openai/openai-python/blob/v0.28.1/openai/api_resources/completion.py
# OpenAI v1 api:
# https://github.com/openai/openai-python/blob/main/src/openai/resources/chat/completions.py
# https://github.com/openai/openai-python/blob/main/src/openai/resources/completions.py
if (
name == "openai.api_resources.chat_completion.ChatCompletion.create"
or name == "openai.resources.chat.completions.Completions.create" # openai v1
):
return self._get_openai_metrics_for_chat_api(api_call)
elif (
name == "openai.api_resources.completion.Completion.create"
or name == "openai.resources.completions.Completions.create" # openai v1
):
return self._get_openai_metrics_for_completion_api(api_call)
else:
raise CalculatingMetricsError(f"Calculating metrics for api {name} is not supported.")
def _try_get_model(self, inputs, output):
if IS_LEGACY_OPENAI:
api_type = inputs.get("api_type")
if not api_type:
raise CalculatingMetricsError("Cannot calculate metrics for none or empty api_type.")
if api_type == "azure":
model = inputs.get("engine")
else:
model = inputs.get("model")
else:
if isinstance(output, dict):
model = output.get("model")
else:
model = output[0].model if len(output) > 0 and hasattr(output[0], "model") else None
if not model:
model = inputs.get("model")
if not model:
raise CalculatingMetricsError(
"Cannot get a valid model to calculate metrics. "
"Please specify a engine for AzureOpenAI API or a model for OpenAI API."
)
return model
def _get_openai_metrics_for_chat_api(self, api_call):
inputs = api_call.get("inputs")
output = api_call.get("output")
metrics = {}
enc, tokens_per_message, tokens_per_name = self._get_encoding_for_chat_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = self._get_prompt_tokens_from_messages(
inputs["messages"],
enc,
tokens_per_message,
tokens_per_name
)
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].delta.content]
)
else:
metrics["completion_tokens"] = self._get_completion_tokens_for_chat_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
def _get_encoding_for_chat_api(self, model):
try:
enc = tiktoken.encoding_for_model(model)
except KeyError:
enc = tiktoken.get_encoding("cl100k_base")
if model == "gpt-35-turbo-0301":
tokens_per_message = 4
tokens_per_name = -1
elif "gpt-35-turbo" in model or "gpt-3.5-turbo" in model or "gpt-4" in model:
tokens_per_message = 3
tokens_per_name = 1
else:
raise CalculatingMetricsError(f"Calculating metrics for model {model} is not supported.")
return enc, tokens_per_message, tokens_per_name
def _get_prompt_tokens_from_messages(self, messages, enc, tokens_per_message, tokens_per_name):
prompt_tokens = 0
for message in messages:
prompt_tokens += tokens_per_message
for key, value in message.items():
prompt_tokens += len(enc.encode(value))
if key == "name":
prompt_tokens += tokens_per_name
prompt_tokens += 3
return prompt_tokens
def _get_completion_tokens_for_chat_api(self, output, enc):
completion_tokens = 0
choices = output.get("choices")
if isinstance(choices, list):
for ch in choices:
if isinstance(ch, dict):
message = ch.get("message")
if isinstance(message, dict):
content = message.get("content")
if isinstance(content, str):
completion_tokens += len(enc.encode(content))
return completion_tokens
def _get_openai_metrics_for_completion_api(self, api_call: dict):
metrics = {}
inputs = api_call.get("inputs")
output = api_call.get("output")
enc = self._get_encoding_for_completion_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = 0
prompt = inputs.get("prompt")
if isinstance(prompt, str):
metrics["prompt_tokens"] = len(enc.encode(prompt))
elif isinstance(prompt, list):
for pro in prompt:
metrics["prompt_tokens"] += len(enc.encode(pro))
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].text]
)
else:
metrics["completion_tokens"] = self._get_completion_tokens_for_completion_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
def _get_encoding_for_completion_api(self, model):
try:
return tiktoken.encoding_for_model(model)
except KeyError:
return tiktoken.get_encoding("p50k_base")
def _get_completion_tokens_for_completion_api(self, output, enc):
completion_tokens = 0
choices = output.get("choices")
if isinstance(choices, list):
for ch in choices:
if isinstance(ch, dict):
text = ch.get("text")
if isinstance(text, str):
completion_tokens += len(enc.encode(text))
return completion_tokens
def merge_metrics_dict(self, metrics: dict, metrics_to_merge: dict):
for k, v in metrics_to_merge.items():
metrics[k] = metrics.get(k, 0) + v
def _log_warning(self, msg):
if self._logger:
self._logger.warning(msg)
class CalculatingMetricsError(UserErrorException):
"""The exception that is raised when calculating metrics failed."""
pass
| promptflow/src/promptflow/promptflow/_utils/openai_metrics_calculator.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/_utils/openai_metrics_calculator.py",
"repo_id": "promptflow",
"token_count": 4184
} | 40 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
"""
This file stores functions and objects that will be used in prompt-flow sdk.
DO NOT change the module names in "all" list, add new modules if needed.
"""
class _DummyCallableClassForLazyImportError:
"""This class is used to put off ImportError until the imported class or function is called."""
@classmethod
def _get_message(cls):
return "azure-ai-ml is not installed. Please install azure-ai-ml to use this feature."
def __init__(self, *args, **kwargs):
raise ImportError(self._get_message())
def __call__(self, *args, **kwargs):
raise ImportError(self._get_message())
# TODO: avoid import azure.ai.ml if promptflow.azure.configure is not called
try:
from azure.ai.ml import MLClient, load_component
from azure.ai.ml.entities import Component
from azure.ai.ml.entities._assets import Code
from azure.ai.ml.entities._component._additional_includes import AdditionalIncludesMixin
from azure.ai.ml.entities._load_functions import load_common
except ImportError:
class load_component(_DummyCallableClassForLazyImportError):
pass
class Component(_DummyCallableClassForLazyImportError):
pass
class MLClient(_DummyCallableClassForLazyImportError):
pass
class load_common(_DummyCallableClassForLazyImportError):
pass
class Code(_DummyCallableClassForLazyImportError):
pass
class AdditionalIncludesMixin(_DummyCallableClassForLazyImportError):
pass
__all__ = [
"load_component",
"Component",
"MLClient",
"load_common",
"Code",
"AdditionalIncludesMixin",
]
| promptflow/src/promptflow/promptflow/azure/_ml/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_ml/__init__.py",
"repo_id": "promptflow",
"token_count": 593
} | 41 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._connections_operations import build_create_connection_request, build_delete_connection_request, build_get_connection_request, build_get_connection_with_secrets_request, build_list_azure_open_ai_deployments_request, build_list_connection_specs_request, build_list_connections_request, build_update_connection_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConnectionsOperations:
"""ConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def create_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
body: Optional["_models.CreateOrUpdateConnectionRequestDto"] = None,
**kwargs: Any
) -> "_models.ConnectionDto":
"""create_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequestDto
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequestDto')
else:
_json = None
request = build_create_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.create_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}'} # type: ignore
@distributed_trace_async
async def update_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
body: Optional["_models.CreateOrUpdateConnectionRequestDto"] = None,
**kwargs: Any
) -> "_models.ConnectionDto":
"""update_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequestDto
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequestDto')
else:
_json = None
request = build_update_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.update_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}'} # type: ignore
@distributed_trace_async
async def get_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ConnectionDto":
"""get_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.get_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}'} # type: ignore
@distributed_trace_async
async def delete_connection(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ConnectionDto":
"""delete_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.delete_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}'} # type: ignore
@distributed_trace_async
async def get_connection_with_secrets(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
**kwargs: Any
) -> "_models.ConnectionDto":
"""get_connection_with_secrets.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_connection_with_secrets_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.get_connection_with_secrets.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_connection_with_secrets.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}/listsecrets'} # type: ignore
@distributed_trace_async
async def list_connections(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> List["_models.ConnectionDto"]:
"""list_connections.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ConnectionDto, or the result of cls(response)
:rtype: list[~flow.models.ConnectionDto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConnectionDto"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connections_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connections.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[ConnectionDto]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connections.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections'} # type: ignore
@distributed_trace_async
async def list_connection_specs(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> List["_models.WorkspaceConnectionSpec"]:
"""list_connection_specs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of WorkspaceConnectionSpec, or the result of cls(response)
:rtype: list[~flow.models.WorkspaceConnectionSpec]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.WorkspaceConnectionSpec"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connection_specs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connection_specs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[WorkspaceConnectionSpec]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connection_specs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/specs'} # type: ignore
@distributed_trace_async
async def list_azure_open_ai_deployments(
self,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
connection_name: str,
**kwargs: Any
) -> List["_models.AzureOpenAIDeploymentDto"]:
"""list_azure_open_ai_deployments.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of AzureOpenAIDeploymentDto, or the result of cls(response)
:rtype: list[~flow.models.AzureOpenAIDeploymentDto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.AzureOpenAIDeploymentDto"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_azure_open_ai_deployments_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.list_azure_open_ai_deployments.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[AzureOpenAIDeploymentDto]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_azure_open_ai_deployments.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}/AzureOpenAIDeployments'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_connections_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/aio/operations/_connections_operations.py",
"repo_id": "promptflow",
"token_count": 9090
} | 42 |
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator (autorest: 3.8.0, generator: @autorest/[email protected])
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, List, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
# fmt: off
def build_create_connection_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
headers=header_parameters,
**kwargs
)
def build_update_connection_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_connection_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_delete_connection_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
headers=header_parameters,
**kwargs
)
def build_get_connection_with_secrets_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}/listsecrets')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_list_connections_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_list_connection_specs_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/specs')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
def build_list_azure_open_ai_deployments_request(
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}/AzureOpenAIDeployments')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, 'str'),
"connectionName": _SERIALIZER.url("connection_name", connection_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
headers=header_parameters,
**kwargs
)
# fmt: on
class ConnectionsOperations(object):
"""ConnectionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~flow.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def create_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
body=None, # type: Optional["_models.CreateOrUpdateConnectionRequestDto"]
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionDto"
"""create_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequestDto
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequestDto')
else:
_json = None
request = build_create_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.create_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}'} # type: ignore
@distributed_trace
def update_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
body=None, # type: Optional["_models.CreateOrUpdateConnectionRequestDto"]
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionDto"
"""update_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:param body:
:type body: ~flow.models.CreateOrUpdateConnectionRequestDto
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if body is not None:
_json = self._serialize.body(body, 'CreateOrUpdateConnectionRequestDto')
else:
_json = None
request = build_update_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
content_type=content_type,
json=_json,
template_url=self.update_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}'} # type: ignore
@distributed_trace
def get_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionDto"
"""get_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.get_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}'} # type: ignore
@distributed_trace
def delete_connection(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionDto"
"""delete_connection.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_connection_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.delete_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_connection.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}'} # type: ignore
@distributed_trace
def get_connection_with_secrets(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionDto"
"""get_connection_with_secrets.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionDto, or the result of cls(response)
:rtype: ~flow.models.ConnectionDto
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionDto"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_connection_with_secrets_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.get_connection_with_secrets.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('ConnectionDto', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_connection_with_secrets.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}/listsecrets'} # type: ignore
@distributed_trace
def list_connections(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.ConnectionDto"]
"""list_connections.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of ConnectionDto, or the result of cls(response)
:rtype: list[~flow.models.ConnectionDto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.ConnectionDto"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connections_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connections.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[ConnectionDto]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connections.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections'} # type: ignore
@distributed_trace
def list_connection_specs(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.WorkspaceConnectionSpec"]
"""list_connection_specs.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of WorkspaceConnectionSpec, or the result of cls(response)
:rtype: list[~flow.models.WorkspaceConnectionSpec]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.WorkspaceConnectionSpec"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_connection_specs_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
template_url=self.list_connection_specs.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[WorkspaceConnectionSpec]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_connection_specs.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/specs'} # type: ignore
@distributed_trace
def list_azure_open_ai_deployments(
self,
subscription_id, # type: str
resource_group_name, # type: str
workspace_name, # type: str
connection_name, # type: str
**kwargs # type: Any
):
# type: (...) -> List["_models.AzureOpenAIDeploymentDto"]
"""list_azure_open_ai_deployments.
:param subscription_id: The Azure Subscription ID.
:type subscription_id: str
:param resource_group_name: The Name of the resource group in which the workspace is located.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param connection_name:
:type connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of AzureOpenAIDeploymentDto, or the result of cls(response)
:rtype: list[~flow.models.AzureOpenAIDeploymentDto]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.AzureOpenAIDeploymentDto"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_azure_open_ai_deployments_request(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
connection_name=connection_name,
template_url=self.list_azure_open_ai_deployments.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('[AzureOpenAIDeploymentDto]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_azure_open_ai_deployments.metadata = {'url': '/flow/api/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.MachineLearningServices/workspaces/{workspaceName}/Connections/{connectionName}/AzureOpenAIDeployments'} # type: ignore
| promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_connections_operations.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_restclient/flow/operations/_connections_operations.py",
"repo_id": "promptflow",
"token_count": 13181
} | 43 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import re
class BulkRunURL:
"""Parser for a flow run URL."""
REGEX_PATTERN = ".*prompts/flow/([^/]+)/([^/]+)/bulktest/([^/]+).*"
RUN_URL_FORMAT = (
"https://ml.azure.com/prompts/flow/{}/{}/bulktest/{}/details?wsid="
"/subscriptions/{}/resourcegroups/{}/providers/Microsoft.MachineLearningServices/workspaces/{}"
)
def __init__(self, url: str):
if url:
match = re.match(self.REGEX_PATTERN, url)
if match:
self.experiment_id = match.group(1)
self.flow_id = match.group(2)
self.bulk_test_id = match.group(3)
else:
raise ValueError("Invalid flow run URL: {}".format(url))
@classmethod
def get_url(cls, experiment_id, flow_id, bulk_test_id, subscription_id, resource_group, workspace_name):
return cls.RUN_URL_FORMAT.format(
experiment_id, flow_id, bulk_test_id, subscription_id, resource_group, workspace_name
)
class BulkRunId:
"""Parser for a flow run ID."""
REGEX_PATTERN = "azureml://experiment/([^/]+)/flow/([^/]+)/bulktest/([^/]+)(/run/[^/]+)?"
RUN_ID_FORMAT = "azureml://experiment/{}/flow/{}/bulktest/{}"
def __init__(self, arm_id: str):
if arm_id:
match = re.match(self.REGEX_PATTERN, arm_id)
if match:
self.experiment_id = match.group(1)
self.flow_id = match.group(2)
self.bulk_test_id = match.group(3)
if len(match.groups()) > 3:
self.run_id = match.group(4).split("/")[-1].strip()
else:
self.run_id = None
else:
raise ValueError("Invalid flow run ID: {}".format(arm_id))
@classmethod
def get_url(cls, experiment_id, flow_id, bulk_test_id, *, run_id=None):
arm_id = cls.RUN_ID_FORMAT.format(experiment_id, flow_id, bulk_test_id)
if run_id:
arm_id += "/run/{}".format(run_id)
return arm_id
| promptflow/src/promptflow/promptflow/azure/_utils/_url_utils.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/azure/_utils/_url_utils.py",
"repo_id": "promptflow",
"token_count": 1057
} | 44 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException, ValidationException
class InputMappingError(ValidationException):
def __init__(self, target: ErrorTarget = ErrorTarget.EXECUTOR, **kwargs):
super().__init__(target=target, **kwargs)
class EmptyInputsData(UserErrorException):
pass
class ExecutorServiceUnhealthy(SystemErrorException):
pass
| promptflow/src/promptflow/promptflow/batch/_errors.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/batch/_errors.py",
"repo_id": "promptflow",
"token_count": 143
} | 45 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# flake8: noqa
from .flow_executor import FlowExecutor
from .flow_validator import FlowValidator
| promptflow/src/promptflow/promptflow/executor/__init__.py/0 | {
"file_path": "promptflow/src/promptflow/promptflow/executor/__init__.py",
"repo_id": "promptflow",
"token_count": 54
} | 46 |
import os
import pytest
from promptflow.executor import FlowExecutor
from ..utils import get_flow_folder, get_yaml_file
@pytest.mark.e2etest
class TestAsync:
@pytest.mark.parametrize(
"folder_name, concurrency_levels, expected_concurrency",
[
("async_tools", [1, 2, 3], [1, 2, 2]),
("async_tools_with_sync_tools", [1, 2, 3], [1, 2, 2]),
],
)
def test_executor_node_concurrency(self, folder_name, concurrency_levels, expected_concurrency):
os.chdir(get_flow_folder(folder_name))
executor = FlowExecutor.create(get_yaml_file(folder_name), {})
def calculate_max_concurrency(flow_result):
timeline = []
api_calls = flow_result.run_info.api_calls[0]["children"]
for api_call in api_calls:
timeline.append(("start", api_call["start_time"]))
timeline.append(("end", api_call["end_time"]))
timeline.sort(key=lambda x: x[1])
current_concurrency = 0
max_concurrency = 0
for event, _ in timeline:
if event == "start":
current_concurrency += 1
max_concurrency = max(max_concurrency, current_concurrency)
elif event == "end":
current_concurrency -= 1
return max_concurrency
for i in range(len(concurrency_levels)):
concurrency = concurrency_levels[i]
flow_result = executor.exec_line({"input_str": "Hello"}, node_concurrency=concurrency)
max_concurrency = calculate_max_concurrency(flow_result)
assert max_concurrency == expected_concurrency[i]
assert max_concurrency <= concurrency
| promptflow/src/promptflow/tests/executor/e2etests/test_async.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/e2etests/test_async.py",
"repo_id": "promptflow",
"token_count": 809
} | 47 |
from jinja2 import Template
from promptflow import ToolProvider, tool
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.types import PromptTemplate
class TestCustomLLMTool(ToolProvider):
def __init__(self, connection: AzureOpenAIConnection):
super().__init__()
self.connection = connection
@tool
def call(self, connection_2: AzureOpenAIConnection, api: str, template: PromptTemplate, **kwargs):
prompt = Template(template, trim_blocks=True, keep_trailing_newline=True).render(**kwargs)
assert isinstance(self.connection, AzureOpenAIConnection)
assert isinstance(connection_2, AzureOpenAIConnection)
assert api in ["completion", "chat"]
return prompt
| promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/package_tools/custom_llm_tool.py",
"repo_id": "promptflow",
"token_count": 257
} | 48 |
import pytest
from promptflow._utils.execution_utils import apply_default_value_for_input
from promptflow.contracts.flow import FlowInputDefinition
from promptflow.contracts.tool import ValueType
@pytest.mark.unittest
class TestFlowExecutor:
@pytest.mark.parametrize(
"flow_inputs, inputs, expected_inputs",
[
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
None, # Could handle None input
{"input_from_default": "default_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{},
{"input_from_default": "default_value"},
),
(
{
"input_no_default": FlowInputDefinition(type=ValueType.STRING),
},
{},
{}, # No default value for input.
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.STRING, default="default_value"),
},
{"input_from_default": "input_value", "another_key": "input_value"},
{"input_from_default": "input_value", "another_key": "input_value"},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.BOOL, default=False),
},
{},
{"input_from_default": False},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.LIST, default=[]),
},
{},
{"input_from_default": []},
),
(
{
"input_from_default": FlowInputDefinition(type=ValueType.OBJECT, default={}),
},
{},
{"input_from_default": {}},
),
],
)
def test_apply_default_value_for_input(self, flow_inputs, inputs, expected_inputs):
result = apply_default_value_for_input(flow_inputs, inputs)
assert result == expected_inputs
| promptflow/src/promptflow/tests/executor/unittests/_utils/test_execution_utils.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/_utils/test_execution_utils.py",
"repo_id": "promptflow",
"token_count": 1270
} | 49 |
from pathlib import Path
import pytest
from promptflow._sdk.entities._connection import AzureContentSafetyConnection
from promptflow.contracts._errors import FailedToImportModule
from promptflow.contracts.flow import (
Flow,
FlowInputAssignment,
FlowInputDefinition,
FlowOutputDefinition,
InputAssignment,
InputValueType,
Node,
NodeVariant,
NodeVariants,
ToolSource,
ToolSourceType,
)
from promptflow.contracts.tool import Tool, ToolType, ValueType
from ...utils import EAGER_FLOWS_ROOT, FLOW_ROOT, get_flow_folder, get_flow_package_tool_definition, get_yaml_file
PACKAGE_TOOL_BASE = Path(__file__).parent.parent.parent / "package_tools"
@pytest.mark.e2etest
class TestFlowContract:
@pytest.mark.parametrize(
"flow_folder, expected_connection_names",
[
("web_classification", {"azure_open_ai_connection"}),
("basic-with-connection", {"azure_open_ai_connection"}),
("flow_with_dict_input_with_variant", {"mock_custom_connection"}),
],
)
def test_flow_get_connection_names(self, flow_folder, expected_connection_names):
flow_yaml = get_yaml_file(flow_folder)
flow = Flow.from_yaml(flow_yaml)
assert flow.get_connection_names() == expected_connection_names
def test_flow_get_connection_input_names_for_node_with_variants(self):
# Connection input exists only in python node
flow_folder = "flow_with_dict_input_with_variant"
flow_yaml = get_yaml_file(flow_folder)
flow = Flow.from_yaml(flow_yaml)
assert flow.get_connection_input_names_for_node("print_val") == ["conn"]
def test_flow_get_connection_names_with_package_tool(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
flow_file = flow_folder / "flow.dag.yaml"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tool_definition)
flow = Flow.from_yaml(flow_file)
connection_names = flow.get_connection_names()
assert connection_names == {"azure_open_ai_connection"}
def test_flow_get_connection_input_names_for_node(self, mocker):
flow_folder = PACKAGE_TOOL_BASE / "custom_llm_tool"
flow_file = flow_folder / "flow.dag.yaml"
package_tool_definition = get_flow_package_tool_definition(flow_folder)
mocker.patch("promptflow._core.tools_manager.collect_package_tools", return_value=package_tool_definition)
flow = Flow.from_yaml(flow_file)
connection_names = flow.get_connection_input_names_for_node(flow.nodes[0].name)
assert connection_names == ["connection", "connection_2"]
assert flow.get_connection_input_names_for_node("not_exist") == []
@pytest.mark.parametrize(
"flow_folder_name, environment_variables_overrides, except_environment_variables",
[
pytest.param(
"flow_with_environment_variables",
{"env2": "runtime_env2", "env10": "aaaaa"},
{
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
},
id="LoadEnvVariablesWithOverrides",
),
pytest.param(
"flow_with_environment_variables",
None,
{
"env1": "2",
"env2": "spawn",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
},
id="LoadEnvVariablesWithoutOverrides",
),
pytest.param(
"simple_hello_world",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesWithoutYamlLevelEnvVariables",
),
],
)
def test_flow_get_environment_variables_with_overrides(
self, flow_folder_name, environment_variables_overrides, except_environment_variables
):
flow_folder = get_flow_folder(flow_folder_name)
flow_file = "flow.dag.yaml"
flow = Flow.from_yaml(flow_file=flow_file, working_dir=flow_folder)
merged_environment_variables = flow.get_environment_variables_with_overrides(
environment_variables_overrides=environment_variables_overrides,
)
assert merged_environment_variables == except_environment_variables
@pytest.mark.parametrize(
"flow_folder_name, folder_root, flow_file, environment_variables_overrides, except_environment_variables",
[
pytest.param(
"flow_with_environment_variables",
FLOW_ROOT,
"flow.dag.yaml",
{"env2": "runtime_env2", "env10": "aaaaa"},
{
"env1": "2",
"env2": "runtime_env2",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
"env10": "aaaaa",
},
id="LoadEnvVariablesWithOverrides",
),
pytest.param(
"flow_with_environment_variables",
FLOW_ROOT,
"flow.dag.yaml",
None,
{
"env1": "2",
"env2": "spawn",
"env3": "[1, 2, 3, 4, 5]",
"env4": '{"a": 1, "b": "2"}',
},
id="LoadEnvVariablesWithoutOverrides",
),
pytest.param(
"simple_hello_world",
FLOW_ROOT,
"flow.dag.yaml",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesWithoutYamlLevelEnvVariables",
),
pytest.param(
"simple_with_yaml",
EAGER_FLOWS_ROOT,
"entry.py",
None,
{},
id="LoadEnvVariablesForEagerFlow",
),
pytest.param(
"simple_with_yaml",
EAGER_FLOWS_ROOT,
"entry.py",
{"env2": "runtime_env2", "env10": "aaaaa"},
{"env2": "runtime_env2", "env10": "aaaaa"},
id="LoadEnvVariablesForEagerFlowWithOverrides",
),
],
)
def test_load_env_variables(
self, flow_folder_name, folder_root, flow_file, environment_variables_overrides, except_environment_variables
):
flow_folder = get_flow_folder(flow_folder_name, folder_root)
merged_environment_variables = Flow.load_env_variables(
flow_file=flow_file,
working_dir=flow_folder,
environment_variables_overrides=environment_variables_overrides,
)
assert merged_environment_variables == except_environment_variables
@pytest.mark.unittest
class TestFlow:
@pytest.mark.parametrize(
"flow, expected_value",
[
(
Flow(id="flow_id", name="flow_name", nodes=[], inputs={}, outputs={}, tools=[]),
{
"id": "flow_id",
"name": "flow_name",
"nodes": [],
"inputs": {},
"outputs": {},
"tools": [],
"language": "python",
},
),
(
Flow(
id="flow_id",
name="flow_name",
nodes=[Node(name="node1", tool="tool1", inputs={})],
inputs={"input1": FlowInputDefinition(type=ValueType.STRING)},
outputs={"output1": FlowOutputDefinition(type=ValueType.STRING, reference=None)},
tools=[],
),
{
"id": "flow_id",
"name": "flow_name",
"nodes": [{"name": "node1", "tool": "tool1", "inputs": {}}],
"inputs": {"input1": {"type": ValueType.STRING.value}},
"outputs": {"output1": {"type": ValueType.STRING.value}},
"tools": [],
"language": "python",
},
),
],
)
def test_flow_serialize(self, flow, expected_value):
assert flow.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"id": "flow_id",
"name": "flow_name",
"nodes": [{"name": "node1", "tool": "tool1", "inputs": {}, "outputs": {}}],
"inputs": {"input1": {"type": ValueType.STRING.value}},
"outputs": {"output1": {"type": ValueType.STRING.value}},
"tools": [],
},
Flow(
id="flow_id",
name="flow_name",
nodes=[Node(name="node1", tool="tool1", inputs={})],
inputs={
"input1": FlowInputDefinition(
type=ValueType.STRING, description="", enum=[], is_chat_input=False, is_chat_history=None
)
},
outputs={
"output1": FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment(
value="", value_type=InputValueType.LITERAL, section="", property=""
),
description="",
evaluation_only=False,
is_chat_output=False,
)
},
tools=[],
node_variants={},
program_language="python",
environment_variables={},
),
),
],
)
def test_flow_deserialize(self, data, expected_value):
assert Flow.deserialize(data) == expected_value
def test_import_requisites(self):
tool1 = Tool(name="tool1", type=ToolType.PYTHON, inputs={}, module="yaml")
tool2 = Tool(name="tool2", type=ToolType.PYTHON, inputs={}, module="module")
node1 = Node(name="node1", tool="tool1", inputs={}, module="yaml")
node2 = Node(name="node2", tool="tool2", inputs={}, module="module")
with pytest.raises(FailedToImportModule) as e:
Flow._import_requisites([tool1], [node2])
assert str(e.value).startswith(
"Failed to import modules with error: Import node 'node2' provider module 'module' failed."
)
with pytest.raises(FailedToImportModule) as e:
Flow._import_requisites([tool2], [node1])
assert str(e.value).startswith(
"Failed to import modules with error: Import tool 'tool2' module 'module' failed."
)
def test_apply_default_node_variants(self):
node_variant = NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None}, use_variants=False),
description=None,
)
node_variants = {
"print_val": NodeVariants(
default_variant_id="variant1",
variants={"variant1": node_variant},
)
}
flow1 = Flow(
id="test_flow_id",
name="test_flow",
nodes=[Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=True)],
inputs={},
outputs={},
tools=[],
node_variants=node_variants,
)
# test when node.use_variants is True
flow1._apply_default_node_variants()
assert flow1.nodes[0].use_variants is False
assert flow1.nodes[0].inputs.keys() == {"input2"}
assert flow1.nodes[0].name == "print_val"
flow2 = Flow(
id="test_flow_id",
name="test_flow",
nodes=[Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=False)],
inputs={},
outputs={},
tools=[],
node_variants=node_variants,
)
# test when node.use_variants is False
tmp_nodes = flow2.nodes
flow2._apply_default_node_variants()
assert flow2.nodes == tmp_nodes
@pytest.mark.parametrize(
"node_variants",
[
(None),
(
{
"test": NodeVariants(
default_variant_id="variant1",
variants={
"variant1": NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None})
)
},
)
}
),
(
{
"print_val": NodeVariants(
default_variant_id="test",
variants={
"variant1": NodeVariant(
node=Node(name="print_val_variant", tool=None, inputs={"input2": None})
)
},
)
}
),
],
)
def test_apply_default_node_variant(self, node_variants):
node = Node(name="print_val", tool=None, inputs={"input1": None}, use_variants=True)
assert Flow._apply_default_node_variant(node, node_variants) == node
def test_apply_node_overrides(self):
llm_node = Node(name="llm_node", tool=None, inputs={}, connection="open_ai_connection")
test_node = Node(
name="test_node", tool=None, inputs={"test": InputAssignment("test_value1", InputValueType.LITERAL)}
)
flow = Flow(id="test_flow_id", name="test_flow", nodes=[llm_node, test_node], inputs={}, outputs={}, tools=[])
assert flow == flow._apply_node_overrides(None)
assert flow == flow._apply_node_overrides({})
node_overrides = {
"other_node.connection": "some_connection",
}
with pytest.raises(ValueError):
flow._apply_node_overrides(node_overrides)
node_overrides = {
"llm_node.connection": "custom_connection",
"test_node.test": "test_value2",
}
flow = flow._apply_node_overrides(node_overrides)
assert flow.nodes[0].connection == "custom_connection"
assert flow.nodes[1].inputs["test"].value == "test_value2"
def test_has_aggregation_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow1 = Flow(id="id", name="name", nodes=[llm_node], inputs={}, outputs={}, tools=[])
assert not flow1.has_aggregation_node()
flow2 = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow2.has_aggregation_node()
def test_get_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
flow = Flow(id="id", name="name", nodes=[llm_node], inputs={}, outputs={}, tools=[])
assert flow.get_node("llm_node") is llm_node
assert flow.get_node("other_node") is None
def test_get_tool(self):
tool = Tool(name="tool", type=ToolType.PYTHON, inputs={})
flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[tool])
assert flow.get_tool("tool") is tool
assert flow.get_tool("other_tool") is None
def test_is_reduce_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_reduce_node("llm_node")
assert flow.is_reduce_node("aggre_node")
def test_is_normal_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow.is_normal_node("llm_node")
assert not flow.is_normal_node("aggre_node")
def test_is_llm_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={}, type=ToolType.LLM)
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert flow.is_llm_node(llm_node)
assert not flow.is_llm_node(aggre_node)
def test_is_referenced_by_flow_output(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(name="aggre_node", tool=None, inputs={}, aggregation=True)
output = {
"output": FlowOutputDefinition(
type=ValueType.STRING, reference=InputAssignment("llm_node", InputValueType.NODE_REFERENCE, "output")
)
}
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs=output, tools=[])
assert flow.is_referenced_by_flow_output(llm_node)
assert not flow.is_referenced_by_flow_output(aggre_node)
def test_is_node_referenced_by(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(
name="aggre_node",
tool=None,
inputs={"input": InputAssignment(value="llm_node", value_type=InputValueType.NODE_REFERENCE)},
aggregation=True,
)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_node_referenced_by(aggre_node, llm_node)
assert flow.is_node_referenced_by(llm_node, aggre_node)
def test_is_referenced_by_other_node(self):
llm_node = Node(name="llm_node", tool=None, inputs={})
aggre_node = Node(
name="aggre_node",
tool=None,
inputs={"input": InputAssignment(value="llm_node", value_type=InputValueType.NODE_REFERENCE)},
aggregation=True,
)
flow = Flow(id="id", name="name", nodes=[llm_node, aggre_node], inputs={}, outputs={}, tools=[])
assert not flow.is_referenced_by_other_node(aggre_node)
assert flow.is_referenced_by_other_node(llm_node)
def test_is_chat_flow(self):
chat_input = {"question": FlowInputDefinition(type=ValueType.STRING, is_chat_input=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs=chat_input, outputs={}, tools=[])
assert not standard_flow.is_chat_flow()
assert chat_flow.is_chat_flow()
def test_get_chat_input_name(self):
chat_input = {"question": FlowInputDefinition(type=ValueType.STRING, is_chat_input=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs=chat_input, outputs={}, tools=[])
assert standard_flow.get_chat_input_name() is None
assert chat_flow.get_chat_input_name() == "question"
def test_get_chat_output_name(self):
chat_output = {"answer": FlowOutputDefinition(type=ValueType.STRING, reference=None, is_chat_output=True)}
standard_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs={}, tools=[])
chat_flow = Flow(id="id", name="name", nodes=[], inputs={}, outputs=chat_output, tools=[])
assert standard_flow.get_chat_output_name() is None
assert chat_flow.get_chat_output_name() == "answer"
def test_replace_with_variant(self):
node0 = Node(name="node0", tool=None, inputs={"input0": None}, use_variants=True)
node1 = Node(name="node1", tool="tool1", inputs={"input1": None}, use_variants=False)
node2 = Node(name="node2", tool="tool2", inputs={"input2": None}, use_variants=False)
node_variant = Node(name="node0", tool="tool3", inputs={"input3": None}, use_variants=False)
node_variants = {
"print_val": NodeVariants(
default_variant_id="variant1",
variants={"variant1": NodeVariant(node_variant, None)},
)
}
flow = Flow("test_flow_id", "test_flow", [node0, node1, node2], {}, {}, [], node_variants)
# flow = Flow.from_yaml(get_yaml_file("web_classification"))
tool_cnt = len(flow.tools)
flow._replace_with_variant(node_variant, [flow.nodes[1].tool, flow.nodes[2].tool])
assert "input3" in flow.nodes[0].inputs
assert flow.nodes[0].tool == "tool3"
assert len(flow.tools) == tool_cnt + 2
@pytest.mark.unittest
class TestInputAssignment:
@pytest.mark.parametrize(
"value, expected_value",
[
(InputAssignment("value", InputValueType.LITERAL), "value"),
(InputAssignment("value", InputValueType.FLOW_INPUT), "${flow.value}"),
(InputAssignment("value", InputValueType.NODE_REFERENCE, "section"), "${value.section}"),
(
InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property"),
"${value.section.property}",
),
(InputAssignment(AzureContentSafetyConnection, InputValueType.LITERAL, "section", "property"), "ABCMeta"),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"serialized_value, expected_value",
[
(
"${value.section.property}",
InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property"),
),
(
"${flow.section.property}",
FlowInputAssignment("section.property", prefix="flow.", value_type=InputValueType.FLOW_INPUT),
),
("${value}", InputAssignment("value", InputValueType.NODE_REFERENCE, "output")),
("$value", InputAssignment("$value", InputValueType.LITERAL)),
("value", InputAssignment("value", InputValueType.LITERAL)),
],
)
def test_deserialize(self, serialized_value, expected_value):
input_assignment = InputAssignment.deserialize(serialized_value)
assert input_assignment == expected_value
@pytest.mark.parametrize(
"serialized_reference, expected_value",
[
("input", InputAssignment("input", InputValueType.NODE_REFERENCE, "output")),
("flow.section", FlowInputAssignment("section", value_type=InputValueType.FLOW_INPUT, prefix="flow.")),
(
"flow.section.property",
FlowInputAssignment("section.property", value_type=InputValueType.FLOW_INPUT, prefix="flow."),
),
],
)
def test_deserialize_reference(self, serialized_reference, expected_value):
assert InputAssignment.deserialize_reference(serialized_reference) == expected_value
@pytest.mark.parametrize(
"serialized_node_reference, expected_value",
[
("value", InputAssignment("value", InputValueType.NODE_REFERENCE, "output")),
("value.section", InputAssignment("value", InputValueType.NODE_REFERENCE, "section")),
("value.section.property", InputAssignment("value", InputValueType.NODE_REFERENCE, "section", "property")),
],
)
def test_deserialize_node_reference(self, serialized_node_reference, expected_value):
assert InputAssignment.deserialize_node_reference(serialized_node_reference) == expected_value
@pytest.mark.unittest
class TestFlowInputAssignment:
@pytest.mark.parametrize(
"input_value, expected_value",
[
("flow.section.property", True),
("inputs.section.property", True),
("section.property", False),
("", False),
],
)
def test_is_flow_input(self, input_value, expected_value):
assert FlowInputAssignment.is_flow_input(input_value) == expected_value
def test_deserialize(self):
expected_input = FlowInputAssignment("section.property", prefix="inputs.", value_type=InputValueType.FLOW_INPUT)
assert FlowInputAssignment.deserialize("inputs.section.property") == expected_input
expected_flow = FlowInputAssignment("section.property", prefix="flow.", value_type=InputValueType.FLOW_INPUT)
assert FlowInputAssignment.deserialize("flow.section.property") == expected_flow
with pytest.raises(ValueError):
FlowInputAssignment.deserialize("value")
@pytest.mark.unittest
class TestToolSource:
@pytest.mark.parametrize(
"tool_source, expected_value",
[
({}, ToolSource(type=ToolSourceType.Code)),
({"type": ToolSourceType.Code.value}, ToolSource(type=ToolSourceType.Code)),
(
{"type": ToolSourceType.Package.value, "tool": "tool", "path": "path"},
ToolSource(type=ToolSourceType.Package, tool="tool", path="path"),
),
],
)
def test_deserialize(self, tool_source, expected_value):
assert ToolSource.deserialize(tool_source) == expected_value
@pytest.mark.unittest
class TestNode:
@pytest.mark.parametrize(
"node, expected_value",
[
(
Node(name="test_node", tool="test_tool", inputs={}),
{"name": "test_node", "tool": "test_tool", "inputs": {}},
),
(
Node(name="test_node", tool="test_tool", inputs={}, aggregation=True),
{"name": "test_node", "tool": "test_tool", "inputs": {}, "aggregation": True, "reduce": True},
),
],
)
def test_serialize(self, node, expected_value):
assert node.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{"name": "test_node", "tool": "test_tool", "inputs": {}},
Node(name="test_node", tool="test_tool", inputs={}),
),
(
{"name": "test_node", "tool": "test_tool", "inputs": {}, "aggregation": True},
Node(name="test_node", tool="test_tool", inputs={}, aggregation=True),
),
],
)
def test_deserialize(self, data, expected_value):
assert Node.deserialize(data) == expected_value
@pytest.mark.unittest
class TestFlowInputDefinition:
@pytest.mark.parametrize(
"value, expected_value",
[
(FlowInputDefinition(type=ValueType.BOOL), {"type": ValueType.BOOL.value}),
(
FlowInputDefinition(
type=ValueType.STRING,
default="default",
description="description",
enum=["enum1", "enum2"],
is_chat_input=True,
is_chat_history=True,
),
{
"type": ValueType.STRING.value,
"default": "default",
"description": "description",
"enum": ["enum1", "enum2"],
"is_chat_input": True,
"is_chat_history": True,
},
),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"type": ValueType.STRING,
"default": "default",
"description": "description",
"enum": ["enum1", "enum2"],
"is_chat_input": True,
"is_chat_history": True,
},
FlowInputDefinition(
type=ValueType.STRING,
default="default",
description="description",
enum=["enum1", "enum2"],
is_chat_input=True,
is_chat_history=True,
),
),
(
{
"type": ValueType.STRING,
},
FlowInputDefinition(
type=ValueType.STRING, description="", enum=[], is_chat_input=False, is_chat_history=None
),
),
],
)
def test_deserialize(self, data, expected_value):
assert FlowInputDefinition.deserialize(data) == expected_value
@pytest.mark.unittest
class TestFlowOutputDefinition:
@pytest.mark.parametrize(
"value, expected_value",
[
(FlowOutputDefinition(type=ValueType.BOOL, reference=None), {"type": ValueType.BOOL.value}),
(
FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment("value", InputValueType.NODE_REFERENCE),
description="description",
evaluation_only=True,
is_chat_output=True,
),
{
"type": ValueType.STRING.value,
"reference": "${value.}",
"description": "description",
"evaluation_only": True,
"is_chat_output": True,
},
),
],
)
def test_serialize(self, value, expected_value):
assert value.serialize() == expected_value
@pytest.mark.parametrize(
"data, expected_value",
[
(
{
"type": ValueType.STRING,
},
FlowOutputDefinition(
type=ValueType.STRING,
reference=InputAssignment("", InputValueType.LITERAL),
),
),
],
)
def test_deserialize(self, data, expected_value):
assert FlowOutputDefinition.deserialize(data) == expected_value
| promptflow/src/promptflow/tests/executor/unittests/contracts/test_flow.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/contracts/test_flow.py",
"repo_id": "promptflow",
"token_count": 15418
} | 50 |
import re
import sys
from pathlib import Path
from typing import List
from unittest.mock import mock_open
import pytest
from jinja2 import TemplateSyntaxError
from promptflow._core._errors import InvalidSource
from promptflow._core.tools_manager import ToolLoader
from promptflow._internal import tool
from promptflow._sdk.entities import CustomConnection, CustomStrongTypeConnection
from promptflow.connections import AzureOpenAIConnection
from promptflow.contracts.flow import InputAssignment, InputValueType, Node, ToolSource, ToolSourceType
from promptflow.contracts.tool import AssistantDefinition, InputDefinition, Secret, Tool, ToolType, ValueType
from promptflow.contracts.types import PromptTemplate
from promptflow.exceptions import UserErrorException
from promptflow.executor._errors import (
ConnectionNotFound,
InvalidConnectionType,
NodeInputValidationError,
ResolveToolError,
ValueTypeUnresolved,
)
from promptflow.executor._tool_resolver import ResolvedTool, ToolResolver
from ...utils import DATA_ROOT, FLOW_ROOT
TEST_ROOT = Path(__file__).parent.parent.parent
REQUESTS_PATH = TEST_ROOT / "test_configs/executor_api_requests"
WRONG_REQUESTS_PATH = TEST_ROOT / "test_configs/executor_wrong_requests"
class MyFirstCSTConnection(CustomStrongTypeConnection):
api_key: Secret
api_base: str
@tool(streaming_option_parameter="stream_enabled")
def mock_package_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
@pytest.mark.unittest
class TestToolResolver:
@pytest.fixture
def resolver(self):
return ToolResolver(working_dir=None, connections={})
def test_resolve_tool_by_node_with_diff_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
mocker.patch.object(
resolver,
"_resolve_package_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_script_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_prompt_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_resolve_llm_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
mocker.patch.object(
resolver,
"_integrate_prompt_in_package_node",
return_value=mocker.Mock(node=node, definition=None, callable=None, init_args=None),
)
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Package)
resolver.resolve_tool_by_node(node)
resolver._resolve_package_node.assert_called_once()
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Code)
resolver.resolve_tool_by_node(node)
resolver._resolve_script_node.assert_called_once()
node.type = ToolType.PROMPT
resolver.resolve_tool_by_node(node)
resolver._resolve_prompt_node.assert_called_once()
node.type = ToolType.LLM
resolver.resolve_tool_by_node(node)
resolver._resolve_llm_node.assert_called_once()
resolver._resolve_package_node.reset_mock()
node.type = ToolType.CUSTOM_LLM
node.source = mocker.Mock(type=ToolSourceType.PackageWithPrompt)
resolver.resolve_tool_by_node(node)
resolver._resolve_package_node.assert_called_once()
resolver._integrate_prompt_in_package_node.assert_called_once()
def test_resolve_tool_by_node_with_invalid_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool type" in exec_info.value.message
def test_resolve_tool_by_node_with_invalid_source_type(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool source type" in exec_info.value.message
node.type = ToolType.CUSTOM_LLM
node.source = mocker.Mock(type=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NotImplementedError)
assert "Tool source type" in exec_info.value.message
def test_resolve_tool_by_node_with_no_source(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.source = None
with pytest.raises(ResolveToolError) as ex:
resolver.resolve_tool_by_node(node)
assert isinstance(ex.value.inner_exception, UserErrorException)
def test_resolve_tool_by_node_with_no_source_path(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PROMPT
node.source = mocker.Mock(type=ToolSourceType.Package, path=None)
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, InvalidSource)
assert "Node source path" in exec_info.value.message
def test_resolve_tool_by_node_with_duplicated_inputs(self, resolver, mocker):
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PROMPT
mocker.patch.object(resolver, "_load_source_content", return_value="{{template}}")
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, NodeInputValidationError)
assert "These inputs are duplicated" in exec_info.value.message
def test_resolve_tool_by_node_with_invalid_template(self, resolver, mocker):
node = mocker.Mock(tool=None, inputs={})
node.name = "node"
node.type = ToolType.PROMPT
mocker.patch.object(resolver, "_load_source_content", return_value="{{current context}}")
with pytest.raises(ResolveToolError) as exec_info:
resolver.resolve_tool_by_node(node)
assert isinstance(exec_info.value.inner_exception, TemplateSyntaxError)
expected_message = (
"Tool load failed in 'node': Jinja parsing failed at line 1: "
"(TemplateSyntaxError) expected token 'end of print statement', got 'context'"
)
assert expected_message in exec_info.value.message
def test_convert_node_literal_input_types_with_invalid_case(self):
# Case 1: conn_name not in connections, should raise conn_name not found error
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
)
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
# Case 2: conn_name in connections, but type not matched
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._convert_node_literal_input_types(node, tool)
message = "'AzureOpenAIConnection' is not supported, valid types ['CustomConnection']"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 3: Literal value, type mismatch
tool = Tool(name="mock", type="python", inputs={"int_input": InputDefinition(type=[ValueType.INT])})
node = Node(
name="mock",
tool=tool,
inputs={"int_input": InputAssignment(value="invalid", value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
message = "value 'invalid' is not type int"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 4: Unresolved value, like newly added type not in old version ValueType enum
tool = Tool(name="mock", type="python", inputs={"int_input": InputDefinition(type=["A_good_type"])})
node = Node(
name="mock",
tool=tool,
inputs={"int_input": InputAssignment(value="invalid", value_type=InputValueType.LITERAL)},
)
with pytest.raises(ValueTypeUnresolved):
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
# Case 5: Literal value, invalid image in list
tool = Tool(name="mock", type="python", inputs={"list_input": InputDefinition(type=[ValueType.LIST])})
invalid_image = {"data:image/jpg;base64": "invalid_image"}
node = Node(
name="mock",
tool=tool,
inputs={"list_input": InputAssignment(value=[invalid_image], value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
message = "Invalid base64 image"
assert message in exe_info.value.message, "Expected: {}, Actual: {}".format(message, exe_info.value.message)
# Case 6: Literal value, invalid assistant definition path
tool = Tool(
name="mock",
type="python",
inputs={"assistant_definition": InputDefinition(type=[ValueType.ASSISTANT_DEFINITION])},
)
node = Node(
name="mock",
tool=tool,
inputs={"assistant_definition": InputAssignment(value="invalid_path", value_type=InputValueType.LITERAL)},
)
with pytest.raises(NodeInputValidationError) as exe_info:
tool_resolver = ToolResolver(working_dir=Path(__file__).parent, connections={})
tool_resolver._convert_node_literal_input_types(node, tool)
assert (
"Failed to load assistant definition" in exe_info.value.message
and "is not a valid path" in exe_info.value.message
), "Expected: {}, Actual: {}".format(message, exe_info.value.message)
def test_resolve_llm_connection_to_inputs(self):
# Case 1: node.connection is not specified
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
# Case 2: node.connection is not found from connection manager
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name1",
)
connections = {}
with pytest.raises(ConnectionNotFound):
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
# Case 3: Tool definition with bad input type list
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["int"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(InvalidConnectionType) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert "Connection type can not be resolved for tool" in exe_info.value.message
# Case 4: Tool type not match the connection manager return
tool = Tool(name="mock", type="python", inputs={"conn": InputDefinition(type=["OpenAIConnection"])})
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
with pytest.raises(InvalidConnectionType) as exe_info:
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert "Invalid connection" in exe_info.value.message
# Case 5: Normal case
tool = Tool(
name="mock",
type="python",
inputs={"conn": InputDefinition(type=["OpenAIConnection", "AzureOpenAIConnection"])},
)
node = Node(
name="mock",
tool=tool,
inputs={"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)},
connection="conn_name",
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
key, conn = tool_resolver._resolve_llm_connection_to_inputs(node, tool)
assert key == "conn"
assert isinstance(conn, AzureOpenAIConnection)
def test_resolve_llm_node(self, mocker):
def mock_llm_api_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.LLM, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_llm_node", return_value=tool)
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_package_tool",
return_value=(mock_llm_api_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
mocker.patch.object(tool_resolver, "_load_source_content", return_value="{{text}}")
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
"image": InputAssignment(value=str(DATA_ROOT / "logo.jpg"), value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_llm_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
pattern = re.compile(r"^Hello World!!\[image\]\(Image\([a-z0-9]{8}\)\)$")
prompt = resolved_tool.callable(**kwargs)
assert re.match(pattern, prompt)
def test_resolve_script_node(self, mocker):
def mock_python_func(prompt: PromptTemplate, **kwargs):
for k, v in kwargs.items():
prompt = prompt.replace(f"{{{{{k}}}}}", str(v))
return prompt
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.PYTHON, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, tool))
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_tool_from_module",
return_value=(mock_python_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"prompt": InputAssignment(value="{{text}}", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_script_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
def test_resolve_script_node_with_assistant_definition(self, mocker):
def mock_python_func(input: AssistantDefinition):
if input.model == "model" and input.instructions == "instructions" and input.tools == []:
return True
return False
tool_loader = ToolLoader(working_dir=None)
tool = Tool(
name="mock", type=ToolType.PYTHON, inputs={"input": InputDefinition(type=[ValueType.ASSISTANT_DEFINITION])}
)
mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, tool))
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_tool_from_module",
return_value=(mock_python_func, {}),
)
tool_resolver = ToolResolver(working_dir=Path(__file__).parent, connections={})
tool_resolver._tool_loader = tool_loader
mocker.patch("builtins.open", mock_open())
mocker.patch(
"ruamel.yaml.YAML.load", return_value={"model": "model", "instructions": "instructions", "tools": []}
)
node = Node(
name="mock",
tool=None,
inputs={"input": InputAssignment(value="test_tool_resolver.py", value_type=InputValueType.LITERAL)},
)
resolved_tool = tool_resolver._resolve_script_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 1
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs)
def test_resolve_package_node(self, mocker):
tool_loader = ToolLoader(working_dir=None)
tool = Tool(name="mock", type=ToolType.PYTHON, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])})
mocker.patch.object(tool_loader, "load_tool_for_package_node", return_value=tool)
mocker.patch(
"promptflow._core.tools_manager.BuiltinsManager._load_package_tool",
return_value=(mock_package_func, {"conn": AzureOpenAIConnection}),
)
connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
tool_resolver._tool_loader = tool_loader
node = Node(
name="mock",
tool=None,
inputs={
"conn": InputAssignment(value="conn_name", value_type=InputValueType.LITERAL),
"prompt": InputAssignment(value="{{text}}", value_type=InputValueType.LITERAL),
"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL),
},
connection="conn_name",
provider="mock",
)
resolved_tool = tool_resolver._resolve_package_node(node, convert_input_types=True)
assert len(resolved_tool.node.inputs) == 2
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
def test_integrate_prompt_in_package_node(self, mocker):
tool_resolver = ToolResolver(working_dir=None, connections={})
mocker.patch.object(
tool_resolver,
"_load_source_content",
return_value="{{text}}",
)
tool = Tool(name="mock", type=ToolType.CUSTOM_LLM, inputs={"prompt": InputDefinition(type=["PromptTemplate"])})
node = Node(
name="mock",
tool=None,
inputs={"text": InputAssignment(value="Hello World!", value_type=InputValueType.LITERAL)},
connection="conn_name",
provider="mock",
)
resolved_tool = ResolvedTool(node=node, callable=mock_package_func, definition=tool, init_args=None)
assert resolved_tool.callable._streaming_option_parameter == "stream_enabled"
resolved_tool = tool_resolver._integrate_prompt_in_package_node(resolved_tool)
assert resolved_tool.callable._streaming_option_parameter == "stream_enabled"
kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()}
assert resolved_tool.callable(**kwargs) == "Hello World!"
@pytest.mark.parametrize(
"conn_types, expected_type",
[
(["MyFirstCSTConnection"], MyFirstCSTConnection),
(["CustomConnection", "MyFirstCSTConnection"], CustomConnection),
(["CustomConnection", "MyFirstCSTConnection", "MySecondCSTConnection"], CustomConnection),
(["MyFirstCSTConnection", "MySecondCSTConnection"], MyFirstCSTConnection),
],
)
def test_convert_to_custom_strong_type_connection_value(self, conn_types: List[str], expected_type, mocker):
connections = {"conn_name": {"type": "CustomConnection", "value": {"api_key": "mock", "api_base": "mock"}}}
tool_resolver = ToolResolver(working_dir=None, connections=connections)
node = mocker.Mock(name="node", tool=None, inputs={})
node.type = ToolType.PYTHON
node.source = mocker.Mock(type=ToolSourceType.Code)
tool = Tool(name="tool", type="python", inputs={"conn": InputDefinition(type=["CustomConnection"])})
m = sys.modules[__name__]
v = InputAssignment(value="conn_name", value_type=InputValueType.LITERAL)
actual = tool_resolver._convert_to_custom_strong_type_connection_value(
"conn_name", v, node, tool, conn_types, m
)
assert isinstance(actual, expected_type)
assert actual.api_base == "mock"
def test_load_source(self):
# Create a mock Node object with a valid source path
node = Node(name="mock", tool=None, inputs={}, source=ToolSource())
node.source.path = "./script_with_special_character/script_with_special_character.py"
resolver = ToolResolver(FLOW_ROOT)
result = resolver._load_source_content(node)
assert "https://www.bing.com/\ue000\ue001/" in result
@pytest.mark.parametrize(
"source",
[
None,
ToolSource(path=None), # Then will try to read one directory.
ToolSource(path=""), # Then will try to read one directory.
ToolSource(path="NotExistPath.py"),
],
)
def test_load_source_error(self, source):
# Create a mock Node object with a valid source path
node = Node(name="mock", tool=None, inputs={}, source=source)
resolver = ToolResolver(FLOW_ROOT)
with pytest.raises(InvalidSource) as _:
resolver._load_source_content(node)
| promptflow/src/promptflow/tests/executor/unittests/executor/test_tool_resolver.py/0 | {
"file_path": "promptflow/src/promptflow/tests/executor/unittests/executor/test_tool_resolver.py",
"repo_id": "promptflow",
"token_count": 10851
} | 51 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import contextlib
import os
import sys
import uuid
from typing import Callable
import pytest
from mock.mock import patch
from promptflow._constants import PF_USER_AGENT
from promptflow._core.operation_context import OperationContext
from promptflow._sdk._utils import ClientUserAgentUtil
from promptflow._sdk.entities import Run
from promptflow._utils.utils import environment_variable_overwrite, parse_ua_to_dict
from promptflow.azure import PFClient
from .._azure_utils import DEFAULT_TEST_TIMEOUT, PYTEST_TIMEOUT_METHOD
from ..recording_utilities import is_live
FLOWS_DIR = "./tests/test_configs/flows"
DATAS_DIR = "./tests/test_configs/datas"
RUNS_DIR = "./tests/test_configs/runs"
# TODO: move this to a shared utility module
def run_pf_command(*args, pf, runtime=None, cwd=None):
from promptflow._cli._pf_azure.entry import main
origin_argv, origin_cwd = sys.argv, os.path.abspath(os.curdir)
try:
sys.argv = (
["pfazure"]
+ list(args)
+ [
"--subscription",
pf._ml_client.subscription_id,
"--resource-group",
pf._ml_client.resource_group_name,
"--workspace-name",
pf._ml_client.workspace_name,
]
)
if runtime:
sys.argv += ["--runtime", runtime]
if cwd:
os.chdir(cwd)
main()
finally:
sys.argv = origin_argv
os.chdir(origin_cwd)
@pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD)
@pytest.mark.e2etest
@pytest.mark.usefixtures(
"mock_get_azure_pf_client",
"mock_set_headers_with_user_aml_token",
"single_worker_thread_pool",
"vcr_recording",
)
class TestCliWithAzure:
def test_basic_flow_run_bulk_without_env(self, pf, runtime: str, randstr: Callable[[str], str]) -> None:
name = randstr("name")
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/web_classification",
"--data",
f"{DATAS_DIR}/webClassification3.jsonl",
"--name",
name,
pf=pf,
runtime=runtime,
)
run = pf.runs.get(run=name)
assert isinstance(run, Run)
@pytest.mark.skip("Custom tool pkg and promptprompt pkg with CustomStrongTypeConnection not installed on runtime.")
def test_basic_flow_with_package_tool_with_custom_strong_type_connection(self, pf, runtime) -> None:
name = str(uuid.uuid4())
run_pf_command(
"run",
"create",
"--flow",
f"{FLOWS_DIR}/flow_with_package_tool_with_custom_strong_type_connection",
"--data",
f"{FLOWS_DIR}/flow_with_package_tool_with_custom_strong_type_connection/data.jsonl",
"--name",
name,
pf=pf,
runtime=runtime,
)
run = pf.runs.get(run=name)
assert isinstance(run, Run)
def test_run_with_remote_data(
self, pf, runtime: str, remote_web_classification_data, randstr: Callable[[str], str]
) -> None:
# run with arm id
name = randstr("name1")
run_pf_command(
"run",
"create",
"--flow",
"web_classification",
"--data",
f"azureml:{remote_web_classification_data.id}",
"--name",
name,
pf=pf,
runtime=runtime,
cwd=f"{FLOWS_DIR}",
)
run = pf.runs.get(run=name)
assert isinstance(run, Run)
# run with name version
name = randstr("name2")
run_pf_command(
"run",
"create",
"--flow",
"web_classification",
"--data",
f"azureml:{remote_web_classification_data.name}:{remote_web_classification_data.version}",
"--name",
name,
pf=pf,
runtime=runtime,
cwd=f"{FLOWS_DIR}",
)
run = pf.runs.get(run=name)
assert isinstance(run, Run)
def test_run_file_with_set(self, pf, runtime: str, randstr: Callable[[str], str]) -> None:
name = randstr("name")
run_pf_command(
"run",
"create",
"--file",
f"{RUNS_DIR}/run_with_env.yaml",
"--set",
f"runtime={runtime}",
"--name",
name,
pf=pf,
)
run = pf.runs.get(run=name)
assert isinstance(run, Run)
assert run.properties["azureml.promptflow.runtime_name"] == runtime
@pytest.mark.skipif(condition=not is_live(), reason="This test requires an actual PFClient")
def test_azure_cli_ua(self, pf: PFClient):
# clear user agent before test
context = OperationContext().get_instance()
context.user_agent = ""
with environment_variable_overwrite(PF_USER_AGENT, ""):
with pytest.raises(SystemExit):
run_pf_command(
"run",
"show",
"--name",
"not_exist",
pf=pf,
)
user_agent = ClientUserAgentUtil.get_user_agent()
ua_dict = parse_ua_to_dict(user_agent)
assert ua_dict.keys() == {"promptflow-sdk", "promptflow-cli"}
def test_cli_telemetry(self, pf, runtime: str, randstr: Callable[[str], str]) -> None:
name = randstr("name")
@contextlib.contextmanager
def check_workspace_info(*args, **kwargs):
if "custom_dimensions" in kwargs:
assert kwargs["custom_dimensions"]["workspace_name"] == pf._ml_client.workspace_name
assert kwargs["custom_dimensions"]["resource_group_name"] == pf._ml_client.resource_group_name
assert kwargs["custom_dimensions"]["subscription_id"] == pf._ml_client.subscription_id
yield None
with patch("promptflow._sdk._telemetry.activity.log_activity") as mock_log_activity:
mock_log_activity.side_effect = check_workspace_info
run_pf_command(
"run",
"create",
"--file",
f"{RUNS_DIR}/run_with_env.yaml",
"--set",
f"runtime={runtime}",
"--name",
name,
pf=pf,
)
| promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli_with_azure.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli_with_azure.py",
"repo_id": "promptflow",
"token_count": 3368
} | 52 |
import contextlib
import os
import sys
from pathlib import Path
from typing import List
from unittest.mock import MagicMock, patch
import pandas as pd
import pytest
from pytest_mock import MockFixture
from promptflow._sdk._constants import VIS_PORTAL_URL_TMPL
tests_root_dir = Path(__file__).parent.parent.parent
flow_test_dir = tests_root_dir / "test_configs/flows"
data_dir = tests_root_dir / "test_configs/datas"
def run_pf_command(*args, cwd=None):
from promptflow._cli._pf_azure.entry import main
origin_argv, origin_cwd = sys.argv, os.path.abspath(os.curdir)
try:
sys.argv = ["pfazure"] + list(args)
if cwd:
os.chdir(cwd)
main()
finally:
sys.argv = origin_argv
os.chdir(origin_cwd)
@pytest.fixture
def operation_scope_args(subscription_id: str, resource_group_name: str, workspace_name: str):
return [
"--subscription",
subscription_id,
"--resource-group",
resource_group_name,
"--workspace-name",
workspace_name,
]
@pytest.mark.usefixtures("mock_get_azure_pf_client")
@pytest.mark.unittest
class TestAzureCli:
def test_pf_azure_version(self, capfd):
run_pf_command("--version")
out, err = capfd.readouterr()
assert "0.0.1\n" in out
def test_run_show(self, mocker: MockFixture, operation_scope_args):
from promptflow.azure.operations._run_operations import RunOperations
mocked = mocker.patch.object(RunOperations, "get")
# show_run will print the run object, so we need to mock the return value
mocked.return_value._to_dict.return_value = {"name": "test_run"}
run_pf_command(
"run",
"show",
"--name",
"test_run",
*operation_scope_args,
)
mocked.assert_called_once()
def test_run_show_details(self, mocker: MockFixture, operation_scope_args):
from promptflow.azure.operations._run_operations import RunOperations
mocked = mocker.patch.object(RunOperations, "get_details")
# show_run_details will print details, so we need to mock the return value
mocked.return_value = pd.DataFrame([{"input": "input_value", "output": "output_value"}])
run_pf_command(
"run",
"show-details",
"--name",
"test_run",
"--max-results",
"10",
"--all-results",
*operation_scope_args,
)
mocked.assert_called_once()
def test_run_show_metrics(self, mocker: MockFixture, operation_scope_args):
from promptflow.azure.operations._run_operations import RunOperations
mocked = mocker.patch.object(RunOperations, "get_metrics")
# show_metrics will print the metrics, so we need to mock the return value
mocked.return_value = {"accuracy": 0.9}
run_pf_command(
"run",
"show-metrics",
"--name",
"test_run",
*operation_scope_args,
)
mocked.assert_called_once()
def test_run_list_runs(
self,
mocker: MockFixture,
operation_scope_args,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
):
from promptflow.azure.operations._run_operations import RunOperations
mocked_run = MagicMock()
mocked_run._to_dict.return_value = {"name": "test_run"}
mocked = mocker.patch.object(RunOperations, "list")
# list_runs will print the run list, so we need to mock the return value
mocked.return_value = [mocked_run]
run_pf_command(
"run",
"list",
"--max-results",
"10",
"--include-archived",
*operation_scope_args,
)
run_pf_command(
"run",
"list",
"--max-results",
"10",
"--include-archived",
"--output",
"table",
*operation_scope_args,
)
mocker.patch.dict(
os.environ,
{
"AZUREML_ARM_WORKSPACE_NAME": workspace_name,
"AZUREML_ARM_SUBSCRIPTION": subscription_id,
"AZUREML_ARM_RESOURCEGROUP": resource_group_name,
},
)
run_pf_command(
"run",
"list",
"--max-results",
"10",
"--include-archived",
)
assert mocked.call_count == 3
def test_run_visualize(
self,
operation_scope_args: List[str],
capfd: pytest.CaptureFixture,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
) -> None:
# cloud version visualize is actually a string concatenation
names = "name1,name2,name3"
run_pf_command(
"run",
"visualize",
"--names",
names,
*operation_scope_args,
)
captured = capfd.readouterr()
expected_portal_url = VIS_PORTAL_URL_TMPL.format(
subscription_id=subscription_id,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
names=names,
)
assert expected_portal_url in captured.out
def test_run_archive(
self,
mocker: MockFixture,
operation_scope_args,
):
from promptflow.azure.operations._run_operations import RunOperations
mocked = mocker.patch.object(RunOperations, "archive")
mocked.return_value._to_dict.return_value = {"name": "test_run"}
run_pf_command(
"run",
"archive",
"--name",
"test_run",
*operation_scope_args,
)
mocked.assert_called_once()
def test_run_restore(
self,
mocker: MockFixture,
operation_scope_args,
):
from promptflow.azure.operations._run_operations import RunOperations
mocked = mocker.patch.object(RunOperations, "restore")
mocked.return_value._to_dict.return_value = {"name": "test_run"}
run_pf_command(
"run",
"restore",
"--name",
"test_run",
*operation_scope_args,
)
mocked.assert_called_once()
def test_run_update(
self,
mocker: MockFixture,
operation_scope_args,
):
from promptflow.azure.operations._run_operations import RunOperations
mocked = mocker.patch.object(RunOperations, "update")
mocked.return_value._to_dict.return_value = {"name": "test_run"}
run_pf_command(
"run",
"update",
"--name",
"test_run",
"--set",
"display_name=test_run",
"description='test_description'",
"tags.key1=value1",
*operation_scope_args,
)
mocked.assert_called_once()
def test_flow_create(
self,
mocker: MockFixture,
operation_scope_args,
):
from promptflow.azure.operations._flow_operations import FlowOperations
mocked = mocker.patch.object(FlowOperations, "create_or_update")
mocked.return_value._to_dict.return_value = {"name": "test_run"}
flow_dir = Path(flow_test_dir, "web_classification").resolve().as_posix()
run_pf_command(
"flow",
"create",
"--flow",
flow_dir,
"--set",
"display_name=test_flow",
"type=standard",
"description='test_description'",
"tags.key1=value1",
*operation_scope_args,
)
mocked.assert_called_with(
flow=flow_dir,
display_name="test_flow",
type="standard",
description="test_description",
tags={"key1": "value1"},
)
def test_flow_create_with_unknown_field(self, mocker: MockFixture, operation_scope_args):
from promptflow.azure.operations._flow_operations import FlowOperations
mocked = mocker.patch.object(FlowOperations, "create_or_update")
mocked.return_value._to_dict.return_value = {"name": "test_run"}
flow_dir = Path(flow_test_dir, "web_classification").resolve().as_posix()
run_pf_command(
"flow",
"create",
"--flow",
flow_dir,
"--set",
"random_key=random_value",
*operation_scope_args,
)
mocked.assert_called_with(flow=flow_dir, random_key="random_value")
def test_flow_list(
self,
mocker: MockFixture,
operation_scope_args,
):
from promptflow.azure.operations._flow_operations import FlowOperations
mocked_flow = MagicMock()
mocked_flow._to_dict.return_value = {"name": "test_flow"}
mocked = mocker.patch.object(FlowOperations, "list")
mocked.return_value = [mocked_flow]
run_pf_command(
"flow",
"list",
"--max-results",
"10",
"--include-archived",
"--type",
"standard",
"--include-others",
"--output",
"table",
*operation_scope_args,
)
mocked.assert_called_once()
def test_run_telemetry(
self,
mocker: MockFixture,
operation_scope_args,
subscription_id: str,
resource_group_name: str,
workspace_name: str,
):
from promptflow.azure.operations._run_operations import RunOperations
mocked_run = MagicMock()
mocked_run._to_dict.return_value = {"name": "test_run"}
mocked = mocker.patch.object(RunOperations, "list")
# list_runs will print the run list, so we need to mock the return value
mocked.return_value = [mocked_run]
mocker.patch.dict(
os.environ,
{
"AZUREML_ARM_WORKSPACE_NAME": workspace_name,
"AZUREML_ARM_SUBSCRIPTION": subscription_id,
"AZUREML_ARM_RESOURCEGROUP": resource_group_name,
},
)
@contextlib.contextmanager
def check_workspace_info(*args, **kwargs):
if "custom_dimensions" in kwargs:
assert kwargs["custom_dimensions"]["workspace_name"] == workspace_name
assert kwargs["custom_dimensions"]["resource_group_name"] == resource_group_name
assert kwargs["custom_dimensions"]["subscription_id"] == subscription_id
yield None
with patch("promptflow._sdk._telemetry.activity.log_activity") as mock_log_activity:
mock_log_activity.side_effect = check_workspace_info
run_pf_command(
"run",
"list",
"--max-results",
"10",
"--include-archived",
*operation_scope_args,
)
def test_run_download(self, mocker: MockFixture, operation_scope_args):
from promptflow.azure.operations._run_operations import RunOperations
mocked = mocker.patch.object(RunOperations, "download")
mocked.return_value = "fake_output_run_dir"
run_pf_command(
"run",
"download",
"--name",
"test_run",
"--output",
"fake_output_dir",
"--overwrite",
*operation_scope_args,
)
mocked.assert_called_once()
def test_run_cancel(self, mocker: MockFixture, operation_scope_args):
from promptflow.azure.operations._run_operations import RunOperations
mocked = mocker.patch.object(RunOperations, "cancel")
run_pf_command(
"run",
"cancel",
"--name",
"test_run",
*operation_scope_args,
)
mocked.assert_called_once()
| promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_cli.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_azure_test/unittests/test_cli.py",
"repo_id": "promptflow",
"token_count": 5953
} | 53 |
ENVIRON_TEST_MODE = "PROMPT_FLOW_TEST_MODE"
class RecordMode:
LIVE = "live"
RECORD = "record"
REPLAY = "replay"
| promptflow/src/promptflow/tests/sdk_cli_test/recording_utilities/constants.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/recording_utilities/constants.py",
"repo_id": "promptflow",
"token_count": 60
} | 54 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import importlib.util
from pathlib import Path
import pytest
TOOL_DIR = Path("./tests/test_configs/tools")
@pytest.mark.unittest
class TestTool:
def get_tool_meta_by_path(self, client, tool_path, module_name):
# Load the module from the file path
spec = importlib.util.spec_from_file_location(module_name, tool_path)
tool_module = importlib.util.module_from_spec(spec)
# Load the module's code
spec.loader.exec_module(tool_module)
# List meta data of tools
tool_meta = client.tools._generate_tool_meta(tool_module)
return tool_meta
def test_python_tool_meta(self, pf):
tool_path = TOOL_DIR / "python_tool.py"
tools_meta, _ = self.get_tool_meta_by_path(pf, tool_path, "python_tool")
# Get python script tool meta
expect_tools_meta = {
"python_tool.my_python_tool": {
"name": "python_tool",
"type": "python",
"inputs": {"input1": {"type": ["string"]}},
"module": "python_tool",
"function": "my_python_tool",
},
"python_tool.my_python_tool_without_name": {
"name": "my_python_tool_without_name",
"type": "python",
"inputs": {"input1": {"type": ["string"]}},
"module": "python_tool",
"function": "my_python_tool_without_name",
},
"python_tool.PythonTool.python_tool": {
"name": "PythonTool.python_tool",
"type": "python",
"inputs": {"connection": {"type": ["AzureOpenAIConnection"]}, "input1": {"type": ["string"]}},
"module": "python_tool",
"class_name": "PythonTool",
"function": "python_tool",
},
}
assert tools_meta == expect_tools_meta
def test_custom_tool_meta(self, pf):
tool_path = TOOL_DIR / "custom_llm_tool.py"
tools_meta, _ = self.get_tool_meta_by_path(pf, tool_path, "custom_llm_tool")
expect_meta = {
"custom_llm_tool.TestCustomLLMTool.tool_func": {
"class_name": "TestCustomLLMTool",
"description": "This is a tool to demonstrate the custom_llm tool type",
"enable_kwargs": True,
"function": "tool_func",
"inputs": {"api": {"type": ["string"]}, "connection": {"type": ["AzureOpenAIConnection"]}},
"module": "custom_llm_tool",
"name": "My Custom LLM Tool",
"type": "custom_llm",
},
"custom_llm_tool.my_tool": {
"description": "This is a tool to demonstrate the custom_llm tool type",
"enable_kwargs": True,
"function": "my_tool",
"inputs": {"connection": {"type": ["CustomConnection"]}},
"module": "custom_llm_tool",
"name": "My Custom LLM Tool",
"type": "custom_llm",
},
}
assert tools_meta == expect_meta
| promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_tool.py/0 | {
"file_path": "promptflow/src/promptflow/tests/sdk_cli_test/unittests/test_tool.py",
"repo_id": "promptflow",
"token_count": 1585
} | 55 |
$schema: https://azuremlschemas.azureedge.net/promptflow/latest/AzureContentSafetyConnection.schema.json
name: my_azure_content_safety_connection
type: azure_content_safety # snake case
api_key: "<to-be-replaced>"
endpoint: "endpoint"
api_version: "2023-04-30-preview"
api_type: Content Safety
| promptflow/src/promptflow/tests/test_configs/connections/azure_content_safety_connection.yaml/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/connections/azure_content_safety_connection.yaml",
"repo_id": "promptflow",
"token_count": 106
} | 56 |
{"key": {"key": "value in data"}}
| promptflow/src/promptflow/tests/test_configs/datas/dictInput1.jsonl/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/datas/dictInput1.jsonl",
"repo_id": "promptflow",
"token_count": 13
} | 57 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
def my_flow():
"""Simple flow without yaml."""
print("Hello world!")
| promptflow/src/promptflow/tests/test_configs/eager_flows/flow_with_environment/entry.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/flow_with_environment/entry.py",
"repo_id": "promptflow",
"token_count": 52
} | 58 |
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
def my_flow(input_val: str = "gpt") -> str:
"""Simple flow without yaml."""
return f"Hello world! {input_val}"
| promptflow/src/promptflow/tests/test_configs/eager_flows/simple_with_yaml/entry.py/0 | {
"file_path": "promptflow/src/promptflow/tests/test_configs/eager_flows/simple_with_yaml/entry.py",
"repo_id": "promptflow",
"token_count": 70
} | 59 |
Subsets and Splits