repo_id
stringlengths
15
132
file_path
stringlengths
34
176
content
stringlengths
2
3.52M
__index_level_0__
int64
0
0
promptflow_repo/promptflow/scripts/docs
promptflow_repo/promptflow/scripts/docs/_static/custom.css
.title { font-weight:700; } .sd-card-header { font-weight:700; font-size: 16px; } .bd-page-width { max-width: 100rem; } .bd-sidebar-primary { flex: 0 0 20%; } .bd-main .bd-content .bd-article-container { max-width: 70em; } html[data-theme="light"] { --header-announcement-color: #fff070; } html[data-theme="dark"] { --header-announcement-color: #4d4d00; } .bd-header-announcement { background: var(--header-announcement-color); } /* (A) LIGHTBOX BACKGROUND */ #lightbox { /* (A1) COVERS FULLSCREEN */ position: fixed; z-index: 1060; top: 0; left: 0; width: 100%; height: 100%; /* (A2) BACKGROUND */ background: rgba(0, 0, 0, 0.5); /* (A3) CENTER IMAGE ON SCREEN */ display: flex; align-items: center; align-items: center; /* (A4) HIDDEN BY DEFAULT */ visibility: hidden; opacity: 0; /* (A5) SHOW/HIDE ANIMATION */ transition: opacity ease 0.4s; } /* (A6) TOGGLE VISIBILITY */ #lightbox.show { visibility: visible; opacity: 1; } /* (B) LIGHTBOX IMAGE */ #lightbox img { /* (B1) DIMENSIONS */ width: 100%; height: 100%; /* (B2) IMAGE FIT */ /* contain | cover | fill | scale-down */ object-fit: contain; }
0
promptflow_repo/promptflow/scripts/docs
promptflow_repo/promptflow/scripts/docs/_static/logo.svg
<svg width="512" height="512" viewBox="0 0 512 512" fill="none" xmlns="http://www.w3.org/2000/svg"> <g clip-path="url(#clip0_699_15212)"> <path fill-rule="evenodd" clip-rule="evenodd" d="M237 39.0408V461.693C237 469.397 228.655 474.208 221.988 470.346L151.918 429.764C130.306 417.247 117 394.164 117 369.19V148.892C117 123.917 130.306 100.834 151.918 88.3177L237 39.0408Z" fill="url(#paint0_linear_699_15212)"/> <path d="M395.075 127.51L237 39V167.541L283.451 192.041L395.075 127.51Z" fill="url(#paint1_linear_699_15212)"/> <path d="M395.075 127.51L237 39V167.541L283.451 192.041L395.075 127.51Z" fill="url(#paint2_linear_699_15212)"/> <path fill-rule="evenodd" clip-rule="evenodd" d="M255.5 231.426C255.5 217.184 263.073 204.017 275.382 196.854L395 127.248V216.101C395 241.03 381.742 264.078 360.193 276.611L270.528 328.76C263.861 332.637 255.5 327.828 255.5 320.116L255.5 231.426Z" fill="url(#paint3_linear_699_15212)"/> </g> <defs> <linearGradient id="paint0_linear_699_15212" x1="196.286" y1="183.041" x2="270.786" y2="92.5087" gradientUnits="userSpaceOnUse"> <stop stop-color="#3272ED"/> <stop offset="1" stop-color="#AF7BD6"/> </linearGradient> <linearGradient id="paint1_linear_699_15212" x1="457.98" y1="131.313" x2="260.351" y2="133.014" gradientUnits="userSpaceOnUse"> <stop stop-color="#DA7ED0"/> <stop offset="0.05" stop-color="#B77BD4"/> <stop offset="0.11" stop-color="#9079DA"/> <stop offset="0.18" stop-color="#6E77DF"/> <stop offset="0.25" stop-color="#5175E3"/> <stop offset="0.33" stop-color="#3973E7"/> <stop offset="0.42" stop-color="#2772E9"/> <stop offset="0.54" stop-color="#1A71EB"/> <stop offset="0.813361" stop-color="#1371EC"/> <stop offset="1" stop-color="#064495"/> </linearGradient> <linearGradient id="paint2_linear_699_15212" x1="210.18" y1="4.19164" x2="307.181" y2="175.949" gradientUnits="userSpaceOnUse"> <stop stop-color="#712575"/> <stop offset="0.09" stop-color="#9A2884"/> <stop offset="0.18" stop-color="#BF2C92"/> <stop offset="0.27" stop-color="#DA2E9C"/> <stop offset="0.34" stop-color="#EB30A2"/> <stop offset="0.4" stop-color="#F131A5"/> <stop offset="0.5" stop-color="#EC30A3"/> <stop offset="0.61" stop-color="#DF2F9E"/> <stop offset="0.72" stop-color="#C92D96"/> <stop offset="0.83" stop-color="#AA2A8A"/> <stop offset="0.95" stop-color="#83267C"/> <stop offset="1" stop-color="#712575"/> </linearGradient> <linearGradient id="paint3_linear_699_15212" x1="308" y1="260.041" x2="307.043" y2="133.204" gradientUnits="userSpaceOnUse"> <stop stop-color="#1D5CD6"/> <stop offset="1" stop-color="#787BE5"/> </linearGradient> <clipPath id="clip0_699_15212"> <rect width="512" height="512" fill="white"/> </clipPath> </defs> </svg>
0
promptflow_repo/promptflow/scripts/docs
promptflow_repo/promptflow/scripts/docs/gallery_directive/__init__.py
"""A directive to generate a gallery of images from structured data. Generating a gallery of images that are all the same size is a common pattern in documentation, and this can be cumbersome if the gallery is generated programmatically. This directive wraps this particular use-case in a helper-directive to generate it with a single YAML configuration file. It currently exists for maintainers of the pydata-sphinx-theme, but might be abstracted into a standalone package if it proves useful. """ from yaml import safe_load from typing import List from pathlib import Path from docutils import nodes from docutils.parsers.rst import directives from sphinx.util.docutils import SphinxDirective from sphinx.util import logging logger = logging.getLogger(__name__) TEMPLATE_GRID = """ `````{{grid}} {grid_columns} {container_options} {content} ````` """ GRID_CARD = """ ````{{grid-item-card}} {title} {card_options} {content} ```` """ class GalleryDirective(SphinxDirective): """A directive to show a gallery of images and links in a grid.""" name = "gallery-grid" has_content = True required_arguments = 0 optional_arguments = 1 final_argument_whitespace = True option_spec = { # A class to be added to the resulting container "grid-columns": directives.unchanged, "class-container": directives.unchanged, "class-card": directives.unchanged, } def run(self) -> List[nodes.Node]: # noqa: C901 if self.arguments: # If an argument is given, assume it's a path to a YAML file # Parse it and load it into the directive content path_data_rel = Path(self.arguments[0]) path_doc, _ = self.get_source_info() path_doc = Path(path_doc).parent path_data = (path_doc / path_data_rel).resolve() if not path_data.exists(): logger.warn(f"Could not find grid data at {path_data}.") nodes.text("No grid data found at {path_data}.") return yaml_string = path_data.read_text() else: yaml_string = "\n".join(self.content) # Read in YAML so we can generate the gallery grid_data = safe_load(yaml_string) grid_items = [] for item in grid_data: # Grid card parameters options = {} if "website" in item: options["link"] = item["website"] if "class-card" in self.options: options["class-card"] = self.options["class-card"] if "img-background" in item: options["img-background"] = item["img-background"] if "img-top" in item: options["img-top"] = item["img-top"] if "img-bottom" in item: options["img-bottom"] = item["img-bottom"] options_str = "\n".join(f":{k}: {v}" for k, v in options.items()) + "\n\n" # Grid card content content_str = "" if "header" in item: content_str += f"{item['header']}\n\n^^^\n\n" if "image" in item: content_str += f"![Gallery image]({item['image']})\n\n" if "content" in item: content_str += f"{item['content']}\n\n" if "footer" in item: content_str += f"+++\n\n{item['footer']}\n\n" title = item.get("title", "") content_str += "\n" grid_items.append( GRID_CARD.format( card_options=options_str, content=content_str, title=title ) ) # Parse the template with Sphinx Design to create an output container = nodes.container() # Prep the options for the template grid container_options = {"gutter": 2, "class-container": "gallery-directive"} if "class-container" in self.options: container_options[ "class-container" ] += f' {self.options["class-container"]}' container_options_str = "\n".join( f":{k}: {v}" for k, v in container_options.items() ) # Create the directive string for the grid grid_directive = TEMPLATE_GRID.format( grid_columns=self.options.get("grid-columns", "1 2 3 4"), container_options=container_options_str, content="\n".join(grid_items), ) # Parse content as a directive so Sphinx Design processes it self.state.nested_parse([grid_directive], 0, container) # Sphinx Design outputs a container too, so just use that container = container.children[0] # Add extra classes if self.options.get("container-class", []): container.attributes["classes"] += self.options.get("class", []) return [container]
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/json_schema/EagerFlow.schema.json
{ "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { "EagerFlowSchema": { "properties": { "additional_includes": { "title": "additional_includes", "type": "array", "items": { "title": "additional_includes", "type": "string" } }, "description": { "title": "description", "type": "string" }, "display_name": { "title": "display_name", "type": "string" }, "entry": { "title": "entry", "type": "string" }, "environment": { "title": "environment", "type": "object", "additionalProperties": {} }, "language": { "title": "language", "type": "string" }, "path": { "title": "path", "type": "string" }, "$schema": { "title": "$schema", "type": "string", "readOnly": true }, "tags": { "title": "tags", "type": "object", "additionalProperties": { "title": "tags", "type": "string" } }, "type": { "title": "type", "type": "string", "enum": [ "standard", "evaluation", "chat" ], "enumNames": [] } }, "type": "object", "required": [ "entry", "path" ], "additionalProperties": false } }, "$ref": "#/definitions/EagerFlowSchema" }
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/json_schema/gen_json_schema.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- # flake8: noqa # This file is part of scripts\generate_json_schema.py in sdk-cli-v2, which is used to generate json schema # To use this script, run `python <this_file>` in promptflow env, # and the json schema will be generated in the same folder. from inspect import isclass import json from azure.ai.ml._schema import ExperimentalField from promptflow._sdk.schemas._base import YamlFileSchema from promptflow._sdk.schemas._fields import UnionField from marshmallow import Schema, fields, missing from marshmallow.class_registry import get_class from marshmallow_jsonschema import JSONSchema class PatchedJSONSchema(JSONSchema): required = fields.Method("get_required") properties = fields.Method("get_properties") def __init__(self, *args, **kwargs): """Setup internal cache of nested fields, to prevent recursion. :param bool props_ordered: if `True` order of properties will be save as declare in class, else will using sorting, default is `False`. Note: For the marshmallow scheme, also need to enable ordering of fields too (via `class Meta`, attribute `ordered`). """ self._nested_schema_classes = {} self.nested = kwargs.pop("nested", False) self.props_ordered = kwargs.pop("props_ordered", False) setattr(self.opts, "ordered", self.props_ordered) super().__init__(*args, **kwargs) # cspell: ignore pytype def _from_python_type(self, obj, field, pytype): metadata = field.metadata.get("metadata", {}) metadata.update(field.metadata) # This is in the upcoming release of marshmallow-jsonschema, but not available yet if isinstance(field, fields.Dict): values = metadata.get("values", None) or field.value_field json_schema = {"title": field.attribute or field.data_key or field.name} json_schema["type"] = "object" if values: values.parent = field json_schema["additionalProperties"] = self._get_schema_for_field(obj, values) if values else {} return json_schema if isinstance(field, fields.Raw): json_schema = {"title": field.attribute or field.data_key or field.name} return json_schema return super()._from_python_type(obj, field, pytype) def _get_schema_for_field(self, obj, field): """Get schema and validators for field.""" if hasattr(field, "_jsonschema_type_mapping"): schema = field._jsonschema_type_mapping() # pylint: disable=protected-access elif "_jsonschema_type_mapping" in field.metadata: schema = field.metadata["_jsonschema_type_mapping"] else: if isinstance(field, UnionField): schema = self._get_schema_for_union_field(obj, field) elif isinstance(field, ExperimentalField): schema = self._get_schema_for_field(obj, field.experimental_field) elif isinstance(field, fields.Constant): schema = {"const": field.constant} else: schema = super()._get_schema_for_field(obj, field) if field.data_key: schema["title"] = field.data_key return schema def _get_schema_for_union_field(self, obj, field): has_yaml_option = False schemas = [] for field_item in field._union_fields: # pylint: disable=protected-access if isinstance(field_item, fields.Nested) and isinstance(field_item.schema, YamlFileSchema): has_yaml_option = True schemas.append(self._get_schema_for_field(obj, field_item)) if has_yaml_option: schemas.append({"type": "string", "pattern": "^file:.*"}) if field.allow_none: schemas.append({"type": "null"}) if field.is_strict: schema = {"oneOf": schemas} else: schema = {"anyOf": schemas} # This happens in the super() call to get_schema, doing here to allow for adding # descriptions and other schema attributes from marshmallow metadata metadata = field.metadata.get("metadata", {}) for md_key, md_val in metadata.items(): if md_key in ("metadata", "name"): continue schema[md_key] = md_val return schema def _from_nested_schema(self, obj, field): """patch in context for nested field""" if isinstance(field.nested, (str, bytes)): nested = get_class(field.nested) else: nested = field.nested if isclass(nested) and issubclass(nested, Schema): only = field.only exclude = field.exclude context = getattr(field.parent, "context", {}) field.nested = nested(only=only, exclude=exclude, context=context) return super()._from_nested_schema(obj, field) def get_properties(self, obj): """Fill out properties field.""" properties = self.dict_class() if self.props_ordered: fields_items_sequence = obj.fields.items() else: fields_items_sequence = sorted(obj.fields.items()) for _, field in fields_items_sequence: schema = self._get_schema_for_field(obj, field) properties[field.metadata.get("name") or field.data_key or field.name] = schema return properties def get_required(self, obj): """Fill out required field.""" required = [] for _, field in sorted(obj.fields.items()): if field.required: required.append(field.metadata.get("name") or field.data_key or field.name) return required or missing from promptflow._sdk.schemas._connection import AzureOpenAIConnectionSchema, OpenAIConnectionSchema, \ QdrantConnectionSchema, CognitiveSearchConnectionSchema, SerpConnectionSchema, AzureContentSafetyConnectionSchema, \ FormRecognizerConnectionSchema, CustomConnectionSchema, WeaviateConnectionSchema from promptflow._sdk.schemas._run import RunSchema from promptflow._sdk.schemas._flow import FlowSchema, EagerFlowSchema if __name__ == "__main__": cls_list = [FlowSchema, EagerFlowSchema] schema_list = [] for cls in cls_list: target_schema = PatchedJSONSchema().dump(cls(context={"base_path": "./"})) # print(target_schema) file_name = cls.__name__ file_name = file_name.replace("Schema", "") schema_list.append(target_schema["definitions"][cls.__name__]) print(target_schema) schema = { "type": "object", "oneOf": schema_list } with open((f"Flow.schema.json"), "w") as f: f.write(json.dumps(schema, indent=4))
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/json_schema/Flow.schema.json
{ "type": "object", "oneOf": [ { "properties": { "additional_includes": { "title": "additional_includes", "type": "array", "items": { "title": "additional_includes", "type": "string" } }, "description": { "title": "description", "type": "string" }, "display_name": { "title": "display_name", "type": "string" }, "environment": { "title": "environment", "type": "object", "additionalProperties": {} }, "inputs": { "title": "inputs", "type": "object", "additionalProperties": { "type": "object", "$ref": "#/definitions/FlowInputSchema" } }, "language": { "title": "language", "type": "string" }, "node_variants": { "title": "node_variants", "type": "object", "additionalProperties": { "title": "node_variants", "type": "object", "additionalProperties": {} } }, "nodes": { "title": "nodes", "type": "array", "items": { "title": "nodes", "type": "object", "additionalProperties": {} } }, "outputs": { "title": "outputs", "type": "object", "additionalProperties": { "type": "object", "$ref": "#/definitions/FlowOutputSchema" } }, "$schema": { "title": "$schema", "type": "string", "readOnly": true }, "tags": { "title": "tags", "type": "object", "additionalProperties": { "title": "tags", "type": "string" } }, "type": { "title": "type", "type": "string", "enum": [ "standard", "evaluation", "chat" ], "enumNames": [] } }, "type": "object", "additionalProperties": false }, { "properties": { "additional_includes": { "title": "additional_includes", "type": "array", "items": { "title": "additional_includes", "type": "string" } }, "description": { "title": "description", "type": "string" }, "display_name": { "title": "display_name", "type": "string" }, "entry": { "title": "entry", "type": "string" }, "environment": { "title": "environment", "type": "object", "additionalProperties": {} }, "language": { "title": "language", "type": "string" }, "path": { "title": "path", "type": "string" }, "$schema": { "title": "$schema", "type": "string", "readOnly": true }, "tags": { "title": "tags", "type": "object", "additionalProperties": { "title": "tags", "type": "string" } }, "type": { "title": "type", "type": "string", "enum": [ "standard", "evaluation", "chat" ], "enumNames": [] } }, "type": "object", "required": [ "entry", "path" ], "additionalProperties": false } ] }
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/check_enforcer.py
# Enforce the check of pipelines. # This script will get the diff of the current branch and main branch, calculate the pipelines that should be triggered. # Then it will check if the triggered pipelines are successful. This script will loop for 30*loop-times seconds at most. # How many checks are triggered: # 1. sdk checks: sdk_cli_tests, sdk_cli_azure_test, sdk_cli_global_config_tests are triggered. # 2. examples checks: this script calculate the path filters and decide what should be triggered. # Trigger checks and return the status of the checks: # 1. If examples are not correctly generated, fail. # 2. If required pipelines are not triggered within 6 rounds of loops, fail. # 2.1 (special_care global variable could help on some pipelines that need to bypass the check) # Check pipelines succeed or not: # 1. These pipelines should return status within loop-times rounds. # 2. If there is failed pipeline in the triggered pipelines, fail. # Import necessary libraries import os import fnmatch import subprocess import time import argparse import json import sys # Define variables github_repository = "microsoft/promptflow" snippet_debug = 1 # Write debug info to console. merge_commit = "" loop_times = 30 github_workspace = os.path.expanduser("~/promptflow/") # Special cases for pipelines that need to be triggered more or less than default value 1. # If 0, the pipeline will not be ignored in check enforcer. # Please notice that the key should be the Job Name in the pipeline. special_care = { "sdk_cli_tests": 4, "sdk_cli_azure_test": 4, # "samples_connections_connection": 0, } # Copy from original yaml pipelines checks = { "sdk_cli_tests": [ "src/promptflow/**", "scripts/building/**", ".github/workflows/promptflow-sdk-cli-test.yml", ], "sdk_cli_global_config_tests": [ "src/promptflow/**", "scripts/building/**", ".github/workflows/promptflow-global-config-test.yml", ], "sdk_cli_azure_test": [ "src/promptflow/**", "scripts/building/**", ".github/workflows/promptflow-sdk-cli-azure-test.yml", ], } reverse_checks = {} pipelines = {} pipelines_count = {} failed_reason = "" # Define functions def trigger_checks(valid_status_array): global failed_reason global github_repository global merge_commit global snippet_debug global pipelines global pipelines_count output = subprocess.check_output( f"gh api /repos/{github_repository}/commits/{merge_commit}/check-suites?per_page=100", shell=True, ) check_suites = json.loads(output)["check_suites"] for suite in check_suites: if snippet_debug == 1: print(f"check-suites id {suite['id']}") suite_id = suite["id"] output = subprocess.check_output( f"gh api /repos/{github_repository}/check-suites/{suite_id}/check-runs?per_page=100", shell=True, ) check_runs = json.loads(output)["check_runs"] for run in check_runs: if snippet_debug == 1: print(f"check runs name {run['name']}") for key in pipelines.keys(): value = pipelines[key] if value == 0: continue if key in run["name"]: pipelines_count[key] += 1 valid_status_array.append(run) for key in pipelines.keys(): if pipelines_count[key] < pipelines[key]: failed_reason = "Not all pipelines are triggered." def status_checks(valid_status_array): global failed_reason global pipelines global pipelines_count # Basic fact of sdk cli checked pipelines. failed_reason = "" # Loop through each valid status array. for status in valid_status_array: # Check if the pipeline was successful. if status["conclusion"] and status["conclusion"].lower() == "success": # Add 1 to the count of successful pipelines. pass # Check if the pipeline failed. elif status["conclusion"] and status["conclusion"].lower() == "failure": failed_reason = "Required pipelines are not successful." # Check if the pipeline is still running. else: if failed_reason == "": failed_reason = "Required pipelines are not finished." # Print the status of the pipeline to the console. print(status["name"] + " is checking.") def trigger_prepare(input_paths): global github_workspace global checks global reverse_checks global pipelines global pipelines_count global failed_reason global special_care for input_path in input_paths: if "samples_connections_connection" in checks: continue # Check if the input path contains "examples" or "samples". if "examples" in input_path or "samples" in input_path: sys.path.append(os.path.expanduser(github_workspace + "/scripts/readme")) from readme import main as readme_main os.chdir(os.path.expanduser(github_workspace)) # Get the list of pipelines from the readme file. pipelines_samples = readme_main(check=True) git_diff_files = [ item for item in subprocess.check_output( ["git", "diff", "--name-only", "HEAD"] ) .decode("utf-8") .split("\n") if item != "" ] for _ in git_diff_files: failed_reason = "Run readme generation before check in" return # Merge the pipelines from the readme file with the original list of pipelines. for key in pipelines_samples.keys(): value = pipelines_samples[key] checks[key] = value # Reverse checks. for key in checks.keys(): value = checks[key] for path in value: if path in reverse_checks: reverse_checks[path].append(key) else: reverse_checks[path] = [key] # Render pipelines and pipelines_count using input_paths. for input_path in input_paths: # Input pattern /**: input_path should match in the middle. # Input pattern /*: input_path should match last but one. # Other input pattern: input_path should match last. keys = [ key for key in reverse_checks.keys() if fnmatch.fnmatch(input_path, key) ] # Loop through each key in the list of keys. for key_item in keys: # Loop through each pipeline in the list of pipelines. for key in reverse_checks[key_item]: # Check if the pipeline is in the list of pipelines. if key in special_care: pipelines[key] = special_care[key] else: pipelines[key] = 1 # Set the pipeline count to 0. pipelines_count[key] = 0 def run_checks(): global github_repository global snippet_debug global merge_commit global loop_times global github_workspace global failed_reason if merge_commit == "": merge_commit = ( subprocess.check_output(["git", "log", "-1"]).decode("utf-8").split("\n") ) for line in merge_commit: if "Merge" in line: merge_commit = line.split(" ")[-3] break if snippet_debug == 1: print("MergeCommit " + merge_commit) not_started_counter = 5 os.chdir(github_workspace) # Get diff of current branch and main branch. diff = ( subprocess.check_output(["git", "diff", "--name-only", "HEAD", "origin/main"]) .decode("utf-8") .split("\n") ) # Prepare how many pipelines should be triggered. trigger_prepare(diff) if failed_reason != "": raise Exception(failed_reason) # Loop for 15 minutes at most. for i in range(loop_times): # Wait for 30 seconds. time.sleep(30) # Reset the failed reason. failed_reason = "" # Reset the valid status array. valid_status_array = [] # Get all triggered pipelines. # If not all pipelines are triggered, continue. trigger_checks(valid_status_array) if failed_reason != "": if not_started_counter == 0: raise Exception(failed_reason + " for 6 times.") print(failed_reason) not_started_counter -= 1 continue # Get pipeline conclusion priority: # 1. Not successful, Fail. # 2. Not finished, Continue. # 3. Successful, Break. status_checks(valid_status_array) # Check if the failed reason contains "not successful". if "not successful" in failed_reason.lower(): raise Exception(failed_reason) # Check if the failed reason contains "not finished". elif "not finished" in failed_reason.lower(): print(failed_reason) continue # Otherwise, print that all required pipelines are successful. else: print("All required pipelines are successful.") break # Check if the failed reason is not empty. if failed_reason != "": raise Exception(failed_reason) if __name__ == "__main__": # Run the checks. parser = argparse.ArgumentParser() parser.add_argument( "-m", "--merge-commit", help="merge commit sha", ) parser.add_argument( "-n", "--loop-times", type=int, help="Loop times", ) parser.add_argument( "-t", "--github-workspace", help="base path of github workspace", ) args = parser.parse_args() if args.merge_commit: merge_commit = args.merge_commit if args.loop_times: loop_times = args.loop_times if args.github_workspace: github_workspace = args.github_workspace run_checks()
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/utils.py
import logging import os import subprocess import sys import time import traceback module_logger = logging.getLogger(__name__) class Color: PURPLE = "\033[95m" CYAN = "\033[96m" DARKCYAN = "\033[36m" BLUE = "\033[94m" GREEN = "\033[92m" YELLOW = "\033[93m" RED = "\033[91m" BOLD = "\033[1m" UNDERLINE = "\033[4m" END = "\033[0m" def print_red(message): print(Color.RED + message + Color.END) def print_blue(message): print(Color.BLUE + message + Color.END) def get_test_files(testpath): if os.path.isfile(testpath): return [testpath] else: res = [] for root, dirs, files in os.walk(testpath): module_logger.debug("Searching %s for files ending in 'tests.py'", root) res.extend([os.path.join(root, file) for file in files if file.endswith("tests.py")]) return res def retry(fn, num_attempts=3): if num_attempts <= 0: raise Exception("Illegal num_attempts: {}".format(num_attempts)) count = 0 for _ in range(0, num_attempts): try: return fn() except Exception: count += 1 print("Execution failed on attempt {} out of {}".format(count, num_attempts)) print("Exception trace:") traceback.print_exc() if count == num_attempts: print("Execution failed after {} attempts".format(count)) raise def _run_command( commands, cwd=None, stderr=subprocess.STDOUT, shell=False, env=None, stream_stdout=True, throw_on_retcode=True, logger=None, ): if logger is None: logger = module_logger if cwd is None: cwd = os.getcwd() t0 = time.perf_counter() try: logger.debug("[RunCommand]Executing {0} in {1}".format(commands, cwd)) out = "" p = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=stderr, cwd=cwd, shell=shell, env=env) for line in p.stdout: line = line.decode("utf-8").rstrip() if line and line.strip(): logger.debug(line) if stream_stdout: sys.stdout.write(line) sys.stdout.write("\n") out += line out += "\n" p.communicate() retcode = p.poll() if throw_on_retcode: if retcode: raise subprocess.CalledProcessError(retcode, p.args, output=out, stderr=p.stderr) return retcode, out finally: t1 = time.perf_counter() logger.debug("[RunCommand] Execution took {0}s for {1} in {2}".format(t1 - t0, commands, cwd)) def run_command( commands, cwd=None, stderr=subprocess.STDOUT, shell=False, stream_stdout=True, throw_on_retcode=True, logger=None ): return _run_command( commands, cwd=cwd, stderr=stderr, shell=shell, stream_stdout=stream_stdout, throw_on_retcode=throw_on_retcode, logger=logger, )
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/generate_connection_config.py
import argparse import json from pathlib import Path from azure.keyvault.secrets import SecretClient from azure.identity import ClientSecretCredential, DefaultAzureCredential CONNECTION_FILE_NAME = "connections.json" CONNECTION_TPL_FILE_PATH = Path(".") / "src/promptflow" / "dev-connections.json.example" def get_secret_client( tenant_id: str, client_id: str, client_secret: str ) -> SecretClient: try: if (tenant_id is None) or (client_id is None) or (client_secret is None): credential = DefaultAzureCredential() client = SecretClient( vault_url="https://promptflowprod.vault.azure.net/", credential=credential, ) else: credential = ClientSecretCredential(tenant_id, client_id, client_secret) client = SecretClient( vault_url="https://github-promptflow.vault.azure.net/", credential=credential, ) except Exception as e: print(e) return client def get_secret(secret_name: str, client: SecretClient): secret = client.get_secret(secret_name) return secret.value def list_secret_names(client: SecretClient) -> list: secret_properties = client.list_properties_of_secrets() return [secret.name for secret in secret_properties] def fill_key_to_dict(template_dict, keys_dict): if not isinstance(template_dict, dict): return for key, val in template_dict.items(): if isinstance(val, str) and val in keys_dict: template_dict[key] = keys_dict[val] continue fill_key_to_dict(val, keys_dict) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--tenant_id", type=str, help="The tenant id of the service principal" ) parser.add_argument( "--client_id", type=str, help="The client id of the service principal" ) parser.add_argument( "--client_secret", type=str, help="The client secret of the service principal" ) parser.add_argument( "--target_folder", type=str, help="The target folder to save the generated file" ) args = parser.parse_args() template_dict = json.loads( open(CONNECTION_TPL_FILE_PATH.resolve().absolute(), "r").read() ) file_path = ( (Path(".") / args.target_folder / CONNECTION_FILE_NAME) .resolve() .absolute() .as_posix() ) print(f"file_path: {file_path}") client = get_secret_client( tenant_id=args.tenant_id, client_id=args.client_id, client_secret=args.client_secret, ) all_secret_names = list_secret_names(client) data = { secret_name: get_secret(secret_name, client) for secret_name in all_secret_names } fill_key_to_dict(template_dict, data) with open(file_path, "w") as f: json.dump(template_dict, f)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/dev_setup.py
import argparse from pathlib import Path from platform import system from utils import print_blue, run_command def setup_promptflow(extra_deps: list, command_args: dict) -> None: print_blue("- Setting up the promptflow SDK ") print_blue("- Installing promptflow Python SDK from local directory") package_location = f"{Path('./src/promptflow/').absolute()}" if extra_deps: print_blue(f"- Installing with extra dependencies: {extra_deps}") extra_deps = ",".join(extra_deps) package_location = f"{package_location}[{extra_deps}]" cmds = ["pip", "install", "-e", package_location] print_blue(f"Running {cmds}") run_command(commands=cmds, **command_args) run_command( commands=["pip", "install", "-r", str(Path("./src/promptflow/dev_requirements.txt").absolute())], **command_args, ) if __name__ == "__main__": epilog = """ Sample Usages: python scripts/building/dev_setup.py python scripts/building/dev_setup.py --promptflow-extra-deps azure """ parser = argparse.ArgumentParser( description="Welcome to promptflow dev setup!", epilog=epilog, ) parser.add_argument( "--promptflow-extra-deps", required=False, nargs="+", type=str, help="extra dependencies for promptflow" ) parser.add_argument("-v", "--verbose", action="store_true", required=False, help="turn on verbose output") args = parser.parse_args() command_args = {"shell": system() == "Windows", "stream_stdout": args.verbose} setup_promptflow(extra_deps=args.promptflow_extra_deps, command_args=command_args) run_command(commands=["pre-commit", "install"], **command_args)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/run_coverage_tests.py
import argparse import os import sys from pathlib import Path from utils import Color, run_command, print_red if __name__ == "__main__": parser = argparse.ArgumentParser(description=Color.RED + "Test Coverage for Promptflow!" + Color.END + "\n") parser.add_argument("-p", required=True, nargs="+", help="The paths to calculate code coverage") parser.add_argument("-t", required=True, nargs="+", help="The path to the tests") parser.add_argument("-l", required=True, help="Location to run tests in") parser.add_argument( "-m", required=True, help="Pytest marker to identify the tests to run", default="all", ) parser.add_argument( "-o", required=False, help="Pytest output file name", default="test-results.xml", ) parser.add_argument("-n", help="Pytest number of process to run the tests", default="auto") parser.add_argument( "--model-name", help="The model file name to run the tests", type=str, default="", ) parser.add_argument("--timeout", help="Timeout for individual tests (seconds)", type=str, default="") parser.add_argument( "--coverage-config", help="The path of code coverage config file", type=str, default="", ) parser.add_argument( "--disable-cov-branch", action="store_true", help="Whether to enable branch coverage calculation", ) parser.add_argument( "--ignore-glob", help="The path of ignored test file", type=str, default="", ) args = parser.parse_args() print("Working directory: " + str(os.getcwd())) print("Args.p: " + str(args.p)) print("Args.t: " + str(args.t)) print("Args.l: " + str(args.l)) print("Args.m: " + str(args.m)) print("Args.n: " + str(args.n)) print("Args.o: " + str(args.o)) print("Args.model-name: " + str(args.model_name)) print("Args.timeout: " + str(args.timeout)) print("Args.coverage-config: " + str(args.coverage_config)) print("Args.ignore-glob: " + str(args.ignore_glob)) print("Args.disable-cov-branch: " + str(args.disable_cov_branch)) test_paths_list = [str(Path(path).absolute()) for path in args.t] # display a list of all Python packages installed in the current Python environment run_command(["pip", "list"]) run_command(["pip", "show", "promptflow", "promptflow-sdk"]) pytest_command = ["pytest", f"--junitxml={args.o}"] pytest_command += test_paths_list if args.coverage_config: if args.p: cov_path_list = [f"--cov={path}" for path in args.p] pytest_command += cov_path_list if not args.disable_cov_branch: pytest_command += ["--cov-branch"] pytest_command += [ # noqa: W503 "--cov-report=term", "--cov-report=html", "--cov-report=xml", ] pytest_command = pytest_command + [f"--cov-config={args.coverage_config}"] if args.ignore_glob: pytest_command = pytest_command + [f"--ignore-glob={args.ignore_glob}"] pytest_command += [ "-n", args.n, "--dist", "loadfile", "--log-level=info", "--log-format=%(asctime)s %(levelname)s %(message)s", "--log-date-format=[%Y-%m-%d %H:%M:%S]", "--durations=5", "-ra", "-vv", ] if args.timeout: pytest_command = pytest_command + [ "--timeout", args.timeout, "--timeout_method", "thread", ] if args.m != "all": pytest_command = pytest_command + ["-m", args.m] if args.model_name: pytest_command = pytest_command + ["--model-name", args.model_name] # pytest --junit-xml=test-results.xml --cov=azure.ai.ml --cov-report=html --cov-report=xml -ra ./tests/*/unittests/ error_code, _ = run_command(pytest_command, throw_on_retcode=False) # https://docs.pytest.org/en/7.1.x/reference/exit-codes.html if error_code == 1: print_red("Tests were collected and run but some of the tests failed.") elif error_code == 2: print_red("Test execution was interrupted by the user.") elif error_code == 3: print_red("Internal error happened while executing tests.") elif error_code == 4: print_red("pytest command line usage error.") elif error_code == 5: print_red("No tests were collected.") sys.exit(error_code)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/building/release-env.yml
name: release-env channels: - defaults - conda-forge dependencies: - pip - pip: - setuptools - twine==4.0.0 - portalocker~=1.2 - setuptools_rust - pytest - pytest-xdist - pytest-sugar - pytest-timeout - azure-keyvault - azure-identity
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/windows/README.md
# Building the Windows MSI Installer This document provides instructions on creating the MSI installer. ## Option1: Building with Github Actions Trigger the [workflow](https://github.com/microsoft/promptflow/actions/workflows/build_msi_installer.yml) manually. ## Option2: Local Building ### Prerequisites 1. Turn on the '.NET Framework 3.5' Windows Feature (required for WIX Toolset). 2. Install 'Microsoft Build Tools 2015'. https://www.microsoft.com/download/details.aspx?id=48159 3. You need to have curl.exe, unzip.exe and msbuild.exe available under PATH. 4. Install 'WIX Toolset build tools' following the instructions below. - Enter the directory where the README is located (`cd scripts/installer/windows`), `mkdir wix` and `cd wix`. - `curl --output wix-archive.zip https://azurecliprod.blob.core.windows.net/msi/wix310-binaries-mirror.zip` - `unzip wix-archive.zip` and `del wix-archive.zip` 5. We recommend creating a clean virtual Python environment and installing all dependencies using src/promptflow/setup.py. - `python -m venv venv` - `venv\Scripts\activate` - `pip install promptflow[azure,executable,pfs] promptflow-tools` ### Building 1. Update the version number `$(env.CLI_VERSION)` and `$(env.FILE_VERSION)` in `product.wxs`, `promptflow.wixproj` and `version_info.txt`. 2. `cd scripts/installer/windows/scripts` and run `pyinstaller promptflow.spec`. 3. `cd scripts/installer/windows` and Run `msbuild /t:rebuild /p:Configuration=Release /p:Platform=x64 promptflow.wixproj`. 4. The unsigned MSI will be in the `scripts/installer/windows/out` folder. ## Notes - If you encounter "Access is denied" error when running promptflow. Please follow the [link](https://learn.microsoft.com/en-us/microsoft-365/security/defender-endpoint/attack-surface-reduction-rules-deployment-implement?view=o365-worldwide#customize-attack-surface-reduction-rules) to add the executable to the Windows Defender Attack Surface Reduction (ASR) rule.
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/windows/product.wxs
<?xml version="1.0" encoding="UTF-8"?> <Wix xmlns="http://schemas.microsoft.com/wix/2006/wi"> <?define ProductVersion="$(env.CLI_VERSION)" ?> <?define ProductName = "promptflow" ?> <?define ProductDescription = "Command-line tools for prompt flow." ?> <?define ProductAuthor = "Microsoft Corporation" ?> <?define ProductResources = ".\resources\" ?> <?define UpgradeCode32 = "8b748161-e07a-48f2-8cdf-401480df4694" ?> <?if $(var.Platform) = "x64" ?> <?define PromptflowCliRegistryGuid = "0efd984f-9eec-425b-b230-a3994b69649a" ?> <?define PromptflowServiceGuid = "d4e99207-77be-4bdf-a430-b08632c5aa2b" ?> <?define PromptflowSystemPathGuid = "4c321045-d4e0-4446-bda4-8c19eaa42af1" ?> <?define ProgramFilesFolder = "ProgramFiles64Folder" ?> <?define RemovePromptflowFolderGuid = "ee843aa5-2b72-4958-be84-53dbac17efc7" ?> <?define UpgradeCode = "772aa21f-f8d4-4771-b910-1dbce3f1920c" ?> <?define Architecture = "64-bit" ?> <?elseif $(var.Platform) = "x86" ?> <?define PromptflowCliRegistryGuid = "7c2c792d-c395-44a1-8222-8e4ea006abb9" ?> <?define PromptflowServiceGuid = "f706b208-a15d-4ae7-9185-cfcc43656570" ?> <?define PromptflowSystemPathGuid = "9661fe6a-ff48-4e7c-a60d-fc34c2d06ef3" ?> <?define ProgramFilesFolder = "ProgramFilesFolder" ?> <?define RemovePromptflowFolderGuid = "588ca5e1-38c6-4659-8b38-762df7ed5b28" ?> <?define UpgradeCode = $(var.UpgradeCode32) ?> <?define Architecture = "32-bit" ?> <?else ?> <?error Unsupported platform "$(var.Platform)" ?> <?endif ?> <Product Id="*" Name="$(var.ProductName) ($(var.Architecture))" Language="1033" Version="$(var.ProductVersion)" Manufacturer="$(var.ProductAuthor)" UpgradeCode="$(var.UpgradeCode)"> <Package InstallerVersion="200" Compressed="yes" InstallScope="perUser" /> <Upgrade Id="$(var.UpgradeCode)"> <UpgradeVersion Property="WIX_UPGRADE_DETECTED" Maximum="$(var.ProductVersion)" IncludeMaximum="no" MigrateFeatures="yes" /> <UpgradeVersion Property="WIX_DOWNGRADE_DETECTED" Minimum="$(var.ProductVersion)" IncludeMinimum="no" OnlyDetect="yes" /> </Upgrade> <InstallExecuteSequence> <RemoveExistingProducts After="InstallExecute" /> </InstallExecuteSequence> <!-- New product architectures should upgrade the original x86 product - even of the same version. --> <?if $(var.UpgradeCode) != $(var.UpgradeCode32) ?> <Upgrade Id="$(var.UpgradeCode32)"> <UpgradeVersion Property="WIX_X86_UPGRADE_DETECTED" Maximum="$(var.ProductVersion)" IncludeMaximum="yes" MigrateFeatures="yes" /> <UpgradeVersion Property="WIX_X86_DOWNGRADE_DETECTED" Minimum="$(var.ProductVersion)" IncludeMinimum="no" OnlyDetect="yes" /> </Upgrade> <Condition Message="A newer version of $(var.ProductName) is already installed.">NOT (WIX_DOWNGRADE_DETECTED OR WIX_X86_DOWNGRADE_DETECTED)</Condition> <?else ?> <Condition Message="A newer version of $(var.ProductName) is already installed.">NOT WIX_DOWNGRADE_DETECTED</Condition> <?endif ?> <Media Id="1" Cabinet="promptflow.cab" EmbedCab="yes" CompressionLevel="high" /> <Icon Id="PromptflowIcon" SourceFile="$(var.ProductResources)logo32.ico" /> <Property Id="ARPPRODUCTICON" Value="PromptflowIcon" /> <Property Id="ARPHELPLINK" Value="https://microsoft.github.io/promptflow/how-to-guides/quick-start.html" /> <Property Id="ARPURLINFOABOUT" Value="https://microsoft.github.io/promptflow/how-to-guides/quick-start.html" /> <Property Id="ARPURLUPDATEINFO" Value="https://microsoft.github.io/promptflow/how-to-guides/quick-start.html" /> <Property Id="MSIFASTINSTALL" Value="7" /> <Property Id="ApplicationFolderName" Value="promptflow" /> <Property Id="WixAppFolder" Value="WixPerUserFolder" /> <Feature Id="ProductFeature" Title="promptflow" Level="1" AllowAdvertise="no"> <ComponentGroupRef Id="ProductComponents" /> </Feature> <!--Custom action to propagate path env variable change--> <CustomActionRef Id="WixBroadcastEnvironmentChange" /> <!-- User Interface --> <WixVariable Id="WixUILicenseRtf" Value="$(var.ProductResources)CLI_LICENSE.rtf"/> <UIRef Id="WixUI_ErrorProgressText"/> <!-- Show message to restart any terminals only if the PATH is changed --> <CustomAction Id="Set_WIXUI_EXITDIALOGOPTIONALTEXT" Property="WIXUI_EXITDIALOGOPTIONALTEXT" Value="Please close and reopen any active terminal window to use prompt flow." /> <InstallUISequence> <Custom Action="Set_WIXUI_EXITDIALOGOPTIONALTEXT" After="CostFinalize">NOT Installed AND NOT WIX_UPGRADE_DETECTED</Custom> </InstallUISequence> <CustomAction Id="StartPromptFlowService" Directory="APPLICATIONFOLDER" Execute="deferred" ExeCommand="wscript.exe promptflow_service.vbs" Return="asyncNoWait" /> <InstallExecuteSequence> <Custom Action="StartPromptFlowService" Before="InstallFinalize">NOT Installed OR WIX_UPGRADE_DETECTED</Custom> </InstallExecuteSequence> </Product> <Fragment> <Directory Id="TARGETDIR" Name="SourceDir"> <Directory Id="$(var.ProgramFilesFolder)"> <Directory Id="APPLICATIONFOLDER" Name="promptflow" /> </Directory> <Directory Id="StartupFolder" /> </Directory> <UIRef Id="WixUI_Advanced" /> </Fragment> <Fragment> <ComponentGroup Id="PromptflowCliSettingsGroup"> <Component Id="RemovePromptflowFolder" Directory="APPLICATIONFOLDER" Guid="$(var.RemovePromptflowFolderGuid)"> <RemoveFolder Id="APPLICATIONFOLDER" On="uninstall" /> </Component> <Component Id="PromptflowSystemPath" Directory="APPLICATIONFOLDER" Guid="$(var.PromptflowSystemPathGuid)"> <Environment Id="PromptflowAddedToPATH" Name="PATH" Value="[APPLICATIONFOLDER]" Permanent="no" Part="first" Action="set" System="no" /> <CreateFolder /> </Component> <Component Id="promptflow_service.vbs" Directory="APPLICATIONFOLDER" Guid="$(var.PromptflowServiceGuid)"> <File Id="promptflow_service.vbs" Source="scripts\promptflow_service.vbs" KeyPath="yes" Checksum="yes"/> </Component> <Component Id="ApplicationShortcut" Directory="StartupFolder" Guid="$(var.PromptflowCliRegistryGuid)"> <Shortcut Id="ApplicationStartMenuShortcut" Name="Prompt flow service" Description="Prompt Flow Service" Target="[#promptflow_service.vbs]" WorkingDirectory="APPLICATIONFOLDER" Advertise="no"> <Icon Id="PromptflowServiceIcon" SourceFile="$(var.ProductResources)logo32.ico" /> </Shortcut> <RemoveFile Id="CleanUpShortCut" Directory="StartupFolder" Name="Prompt flow service" On="uninstall"/> <RegistryKey Root="HKCU" Key="Software\Microsoft\$(var.ProductName)" Action="createAndRemoveOnUninstall"> <RegistryValue Name="installed" Type="integer" Value="1" /> <RegistryValue Name="version" Type="string" Value="$(var.ProductVersion)" KeyPath="yes"/> </RegistryKey> </Component> </ComponentGroup> <ComponentGroup Id="ProductComponents"> <ComponentGroupRef Id="PromptflowCliComponentGroup"/> <ComponentGroupRef Id="PromptflowCliSettingsGroup"/> </ComponentGroup> </Fragment> </Wix>
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/windows/promptflow.wixproj
<?xml version="1.0" encoding="utf-8"?> <Project ToolsVersion="4.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <!-- Project --> <PropertyGroup> <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration> <Platform Condition=" '$(Platform)' == '' ">x86</Platform> <ProductVersion>3.10</ProductVersion> <ProjectGuid>04ff6707-750d-4474-89b3-7922c84721be</ProjectGuid> <SchemaVersion>2.0</SchemaVersion> <OutputName>promptflow-$(env.CLI_VERSION)</OutputName> <OutputType>Package</OutputType> <WixTargetsPath Condition=" '$(WixTargetsPath)' == '' AND '$(MSBuildExtensionsPath32)' != '' ">$(MSBuildExtensionsPath32)\Microsoft\WiX\v3.x\Wix.targets</WixTargetsPath> <WixTargetsPath Condition=" '$(WixTargetsPath)' == '' ">$(MSBuildExtensionsPath)\Microsoft\WiX\v3.x\Wix.targets</WixTargetsPath> </PropertyGroup> <!-- Local WiX --> <PropertyGroup> <LocalWixRoot>wix</LocalWixRoot> <WixToolPath>$(MSBuildThisFileDirectory)$(LocalWixRoot)</WixToolPath> <WixTargetsPath Condition="Exists('$(WixToolPath)\Wix.targets')">$(WixToolPath)\Wix.targets</WixTargetsPath> <WixTasksPath Condition="Exists('$(WixToolPath)\wixtasks.dll')">$(WixToolPath)\wixtasks.dll</WixTasksPath> <PromptflowSource>scripts\dist\promptflow</PromptflowSource> <LinkerAdditionalOptions>-fv</LinkerAdditionalOptions> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x86' "> <OutputPath>out\$(Configuration)\</OutputPath> <IntermediateOutputPath>out\obj\$(Configuration)\</IntermediateOutputPath> <DefineConstants>Debug;PromptflowSource=$(PromptflowSource)</DefineConstants> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x86' "> <OutputPath>out\</OutputPath> <IntermediateOutputPath>out\obj\$(Configuration)\</IntermediateOutputPath> <DefineConstants>PromptflowSource=$(PromptflowSource)</DefineConstants> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|x64' "> <OutputPath>out\$(Configuration)\</OutputPath> <IntermediateOutputPath>out\obj\$(Configuration)\</IntermediateOutputPath> <DefineConstants>Debug;PromptflowSource=$(PromptflowSource)</DefineConstants> </PropertyGroup> <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|x64' "> <OutputPath>out\</OutputPath> <IntermediateOutputPath>out\obj\$(Configuration)\</IntermediateOutputPath> <DefineConstants>PromptflowSource=$(PromptflowSource)</DefineConstants> </PropertyGroup> <ItemGroup> <Compile Include="out\promptflow.wxs"> <Link>promptflow.wxs</Link> </Compile> <Compile Include="product.wxs" /> </ItemGroup> <ItemGroup> <None Include=".\resources\logo_pf.png" /> </ItemGroup> <!-- UI --> <ItemGroup> <WixExtension Include="WixUIExtension"> <HintPath>$(WixExtDir)\WixUIExtension.dll</HintPath> <Name>WixUIExtension</Name> </WixExtension> <WixExtension Include="WixUtilExtension"> <HintPath>$(WixExtDir)\WixUtilExtension.dll</HintPath> <Name>WixUtilExtension</Name> </WixExtension> </ItemGroup> <Import Project="$(WixTargetsPath)" Condition=" '$(WixTargetsPath)' != '' " /> <Import Project="$(MSBuildExtensionsPath32)\Microsoft\WiX\v3.x\wix.targets" Condition=" '$(WixTargetsPath)' == '' AND Exists('$(MSBuildExtensionsPath32)\Microsoft\WiX\v3.x\wix.targets') " /> <Target Name="EnsureWixToolsetInstalled" Condition=" '$(WixTargetsImported)' != 'true' "> <Error Text="The WiX Toolset v3.10 build tools must be installed to build this project. To download the WiX Toolset, see https://wixtoolset.org/releases/v3.10/stable" /> </Target> <Target Name="BeforeBuild"> <HeatDirectory Directory="$(PromptflowSource)" ToolPath="$(WixToolPath)" AutogenerateGuids="true" ComponentGroupName="PromptflowCliComponentGroup" SuppressRootDirectory="true" DirectoryRefId="APPLICATIONFOLDER" OutputFile="out\promptflow.wxs" PreprocessorVariable="var.PromptflowSource" /> </Target> </Project>
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/windows/install_from_msi.md
# Install prompt flow MSI installer on Windows Prompt flow is a suite of development tools designed to streamline the end-to-end development cycle of LLM-based AI applications, that can be installed locally on Windows computers. For Windows, the prompt flow is installed via an MSI, which gives you access to the CLI through the Windows Command Prompt (CMD) or PowerShell. ## Install or update The MSI distributable is used for installing or updating the prompt flow on Windows. You don't need to uninstall current versions before using the MSI installer because the MSI updates any existing version. ::::{tab-set} :::{tab-item} Microsoft Installer (MSI) :sync: Microsoft Installer (MSI) ### Latest version Download and install the latest release of the prompt flow. When the installer asks if it can make changes to your computer, select the "Yes" box. > [Latest release of the promptflow (64-bit)](https://aka.ms/installpromptflowwindowsx64) ) ### Specific version If you prefer, you can download a specific version of the promptflow by using a URL. To download the MSI installer for a specific version, change the version segment in URL https://promptflowartifact.blob.core.windows.net/msi-installer/promptflow-<version>.msi ::: :::{tab-item} Microsoft Installer (MSI) with PowerShell :sync: Microsoft Installer (MSI) with PowerShell ### PowerShell To install the prompt flow using PowerShell, start PowerShell and run the following command: ```PowerShell $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://aka.ms/installpromptflowwindowsx64 -OutFile .\promptflow.msi; Start-Process msiexec.exe -Wait -ArgumentList '/I promptflow.msi /quiet'; Remove-Item .\promptflow.msi ``` This will download and install the latest 64-bit installer of the prompt flow for Windows. To install a specific version, replace the `-Uri` argument with the URL like below. Here is an example of using the 64-bit installer of the promptflow version 1.0.0 in PowerShell: ```PowerShell $ProgressPreference = 'SilentlyContinue'; Invoke-WebRequest -Uri https://promptflowartifact.blob.core.windows.net/msi-installer/promptflow-1.0.0.msi -OutFile .\promptflow.msi; Start-Process msiexec.exe -Wait -ArgumentList '/I promptflow.msi /quiet'; Remove-Item .\promptflow.msi ``` ::: :::: ## Run the prompt flow You can now run the prompt flow with the `pf` or `pfazure` command from either Windows Command Prompt or PowerShell. ## Upgrade the prompt flow Beginning with version 1.4.0, the prompt flow provides an in-tool command to upgrade to the latest version. ```commandline pf upgrade ``` For prompt flow versions prior to 1.4.0, upgrade by reinstalling as described in Install the prompt flow. ## Uninstall You uninstall the prompt flow from the Windows "Apps and Features" list. To uninstall: | Platform | Instructions | |---|---| | Windows 11 | Start > Settings > Apps > Installed apps | | Windows 10 | Start > Settings > System > Apps & Features | | Windows 8 and Windows 7 | Start > Control Panel > Programs > Uninstall a program | Once on this screen type __promptflow_ into the program search bar. The program to uninstall is listed as __promptflow (64-bit)__. Select this application, then select the `Uninstall` button. ## FAQ ### Where is the prompt flow installed? In Windows, the 64-bit prompt flow installs in `C:\Users\**\AppData\Local\Apps\promptflow` by default. ### What version of the prompt flow is installed? Type `pf --version` in a terminal window to know what version of the prompt flow is installed. Your output looks like this: ```output promptflow x.x.x Executable '***\python.exe' Python (Windows) 3.*.* | packaged by conda-forge | * ```
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/resources/CLI_LICENSE.rtf
{\rtf1\ansi\ansicpg1252\cocoartf1504\cocoasubrtf820 {\fonttbl\f0\fnil\fcharset0 Tahoma;\f1\froman\fcharset0 TimesNewRomanPSMT;\f2\ftech\fcharset77 Symbol; } {\colortbl;\red255\green255\blue255;\red0\green0\blue255;} {\*\expandedcolortbl;;\csgenericrgb\c0\c0\c100000;} {\*\listtable{\list\listtemplateid1\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid1\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid1} {\list\listtemplateid2\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid101\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid2} {\list\listtemplateid3\listhybrid{\listlevel\levelnfc4\levelnfcn4\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{lower-alpha\}.}{\leveltext\leveltemplateid201\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid3} {\list\listtemplateid4\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid301\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid4} {\list\listtemplateid5\listhybrid{\listlevel\levelnfc23\levelnfcn23\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{disc\}}{\leveltext\leveltemplateid401\'01\uc0\u8226 ;}{\levelnumbers;}\fi-360\li720\lin720 }{\listname ;}\listid5} {\list\listtemplateid6\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid501\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid6} {\list\listtemplateid7\listhybrid{\listlevel\levelnfc4\levelnfcn4\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{lower-alpha\}.}{\leveltext\leveltemplateid601\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid7} {\list\listtemplateid8\listhybrid{\listlevel\levelnfc0\levelnfcn0\leveljc0\leveljcn0\levelfollow0\levelstartat1\levelspace360\levelindent0{\*\levelmarker \{decimal\}.}{\leveltext\leveltemplateid701\'02\'00.;}{\levelnumbers\'01;}\fi-360\li720\lin720 }{\listname ;}\listid8}} {\*\listoverridetable{\listoverride\listid1\listoverridecount0\ls1}{\listoverride\listid2\listoverridecount0\ls2}{\listoverride\listid3\listoverridecount0\ls3}{\listoverride\listid4\listoverridecount0\ls4}{\listoverride\listid5\listoverridecount0\ls5}{\listoverride\listid6\listoverridecount0\ls6}{\listoverride\listid7\listoverridecount0\ls7}{\listoverride\listid8\listoverridecount0\ls8}} \margl1440\margr1440\vieww10800\viewh8400\viewkind0 \deftab720 \pard\pardeftab720\ri0\sb120\sa120\partightenfactor0 \f0\b\fs20 \cf0 MICROSOFT SOFTWARE LICENSE TERMS\ Microsoft prompt flow \f1 \ \pard\pardeftab720\ri0\sb120\sa120\partightenfactor0 \f0\b0 \cf0 These license terms are an agreement between Microsoft Corporation (or based on where you live, one of its affiliates) and you. They apply to the software named above. The terms also apply to any Microsoft services or updates for the software, except to the extent those have different terms. \f1 \ \pard\pardeftab720\ri0\sb120\sa120\partightenfactor0 \f0\b \cf0 IF YOU COMPLY WITH THESE LICENSE TERMS, YOU HAVE THE RIGHTS BELOW. \f1 \ \pard\tx360\pardeftab720\li357\fi-357\ri0\sb120\sa120\partightenfactor0 \ls1\ilvl0 \f0 \cf0 1. INSTALLATION AND USE RIGHTS. \f1\b0 \ \pard\pardeftab720\li357\ri0\sb120\sa120\partightenfactor0 \f0 \cf0 You may install and use any number of copies of the software.\ \pard\tx450\pardeftab720\li447\fi-357\ri0\sb120\sa120\partightenfactor0 \ls2\ilvl0 \b \cf0 2. TERMS FOR SPECIFIC COMPONENTS \f1 .\ \pard\tx4950\pardeftab720\li720\fi-270\ri0\sb120\sa120\partightenfactor0 \ls3\ilvl0 \f0 \cf0 a. Third Party Components \f1 . \f0\b0 The software may include third party components with separate legal notices or governed by other agreements, as may be described in the ThirdPartyNotices file(s) accompanying the software. Even if such components are governed by other agreements, the disclaimers and the limitations on and exclusions of damages below also apply. \f1 \ \pard\tx450\pardeftab720\li450\fi-357\ri0\sb120\sa120\partightenfactor0 \ls4\ilvl0 \f0\b \cf0 3. DATA. \b0 The software may collect information about you and your use of the software, and send that to Microsoft. Microsoft may use this information to provide services and improve our products and services. You may opt-out of many of these scenarios, but not all, as described in the product documentation. There are also some features in the software that may enable you and Microsoft to collect data from users of your applications. If you use these features, you must comply with applicable law, including providing appropriate notices to users of your applications and you should provide a copy of Microsoft\'92s privacy statement to your users. The Microsoft privacy statement is located here {\field{\*\fldinst{HYPERLINK "https://go.microsoft.com/fwlink/?LinkID=824704"}}{\fldrslt \cf2 \ul \ulc2 https://go.microsoft.com/fwlink/?LinkID=824704}}. You can learn more about data collection and use in the help documentation and our privacy statement. Your use of the software operates as your consent to these practices.\ \pard\tx360\pardeftab720\li357\fi-357\ri0\sb120\sa120\partightenfactor0 \ls4\ilvl0 \b \cf0 4. SCOPE OF LICENSE. \b0 The software is licensed, not sold. This agreement only gives you some rights to use the software. Microsoft reserves all other rights. Unless applicable law gives you more rights despite this limitation, you may use the software only as expressly permitted in this agreement. In doing so, you must comply with any technical limitations in the software that only allow you to use it in certain ways. You may not\ \pard\tx720\pardeftab720\li720\fi-363\ri0\sb120\sa120\partightenfactor0 \ls5\ilvl0 \f2 \cf0 \'a5 \f0 work around any technical limitations in the software;\ \ls5\ilvl0 \f2 \'a5 \f0 reverse engineer, decompile or disassemble the software, or otherwise attempt to derive the source code for the software except, and only to the extent required by third party licensing terms governing the use of certain open source components that may be included in the software;\ \ls5\ilvl0 \f2 \'a5 \f0 remove, minimize, block or modify any notices of Microsoft or its suppliers in the software; \ \ls5\ilvl0 \f2 \'a5 \f0 use the software in any way that is against the law; or\ \ls5\ilvl0 \f2 \'a5 \f0 share, publish, rent or lease the software, or provide the software as a stand-alone hosted as solution for others to use, or transfer the software or this agreement to any third party.\ \pard\tx360\pardeftab720\li357\fi-267\ri0\sb120\sa120\partightenfactor0 \ls6\ilvl0 \b \cf0 5. EXPORT RESTRICTIONS. \b0 You must comply with all domestic and international export laws and regulations that apply to the software, which include restrictions on destinations, end users, and end use. For further information on export restrictions, visit {\field{\*\fldinst{HYPERLINK "http://www.microsoft.com/exporting"}}{\fldrslt \cf2 \ul \ulc2 www.microsoft.com/exporting}}. \f1 \cf2 \ul \ulc2 \ \pard\tx450\pardeftab720\li447\fi-357\ri0\sb120\sa120\partightenfactor0 \ls6\ilvl0 \f0\b \cf0 \ulnone 6. SUPPORT SERVICES. \b0 Because this software is \'93as is,\'94 we may not provide support services for it.\ \ls6\ilvl0 \b 7. ENTIRE AGREEMENT. \b0 This agreement, and the terms for supplements, updates, Internet-based services and support services that you use, are the entire agreement for the software and support services.\ \ls6\ilvl0 \b 8. APPLICABLE LAW. \b0 If you acquired the software in the United States, Washington law applies to interpretation of and claims for breach of this agreement, and the laws of the state where you live apply to all other claims. If you acquired the software in any other country, its laws apply. \f1\b \ \ls6\ilvl0 \f0 9. CONSUMER RIGHTS; REGIONAL VARIATIONS. \b0 This agreement describes certain legal rights. You may have other rights, including consumer rights, under the laws of your state or country. Separate and apart from your relationship with Microsoft, you may also have rights with respect to the party from which you acquired the software. This agreement does not change those other rights if the laws of your state or country do not permit it to do so. For example, if you acquired the software in one of the below regions, or mandatory country law applies, then the following provisions apply to you:\ \pard\pardeftab720\li720\fi-270\ri0\sb120\sa120\partightenfactor0 \ls7\ilvl0 \b \cf0 b. Australia. \b0 You have statutory guarantees under the Australian Consumer Law and nothing in this agreement is intended to affect those rights.\ \pard\pardeftab720\li717\fi-267\ri0\sb120\sa120\partightenfactor0 \ls7\ilvl0 \b \cf0 c. Canada. \b0 If you acquired this software in Canada, you may stop receiving updates by turning off the automatic update feature, disconnecting your device from the Internet (if and when you re-connect to the Internet, however, the software will resume checking for and installing updates), or uninstalling the software. The product documentation, if any, may also specify how to turn off updates for your specific device or software.\ \ls7\ilvl0 \b d. Germany and Austria \f1\b0 .\ \pard\pardeftab720\li717\ri0\sb120\sa120\partightenfactor0 \f0\b \cf0 (i) \f1\b0 \f0\b Warranty \b0 . The properly licensed software will perform substantially as described in any Microsoft materials that accompany the software. However, Microsoft gives no contractual guarantee in relation to the licensed software.\ \b (ii) \f1\b0 \f0\b Limitation of Liability \b0 . In case of intentional conduct, gross negligence, claims based on the Product Liability Act, as well as, in case of death or personal or physical injury, Microsoft is liable according to the statutory law.\ Subject to the foregoing clause (ii), Microsoft will only be liable for slight negligence if Microsoft is in breach of such material contractual obligations, the fulfillment of which facilitate the due performance of this agreement, the breach of which would endanger the purpose of this agreement and the compliance with which a party may constantly trust in (so-called "cardinal obligations"). In other cases of slight negligence, Microsoft will not be liable for slight negligence.\ \pard\tx450\pardeftab720\li447\fi-357\ri0\sb120\sa120\partightenfactor0 \ls8\ilvl0 \b \cf0 10. DISCLAIMER OF WARRANTY. THE SOFTWARE IS LICENSED \'93AS-IS.\'94 YOU BEAR THE RISK OF USING IT. MICROSOFT GIVES NO EXPRESS WARRANTIES, GUARANTEES OR CONDITIONS. TO THE EXTENT PERMITTED UNDER YOUR LOCAL LAWS, MICROSOFT EXCLUDES THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.\ 11. LIMITATION ON AND EXCLUSION OF DAMAGES. YOU CAN RECOVER FROM MICROSOFT AND ITS SUPPLIERS ONLY DIRECT DAMAGES UP TO U.S. $5.00. YOU CANNOT RECOVER ANY OTHER DAMAGES, INCLUDING CONSEQUENTIAL, LOST PROFITS, SPECIAL, INDIRECT OR INCIDENTAL DAMAGES.\ \pard\pardeftab720\li450\ri0\sb120\sa120\partightenfactor0 \b0 \cf0 This limitation applies to (a) anything related to the software, services, content (including code) on third party Internet sites, or third party applications; and (b) claims for breach of contract, breach of warranty, guarantee or condition, strict liability, negligence, or other tort to the extent permitted by applicable law.\ It also applies even if Microsoft knew or should have known about the possibility of the damages. The above limitation or exclusion may not apply to you because your country may not allow the exclusion or limitation of incidental, consequential or other damages.}
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pfazure.bat
@echo off setlocal set MAIN_EXE=%~dp0.\pfcli.exe "%MAIN_EXE%" pfazure %*
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pfs.bat
@echo off setlocal set MAIN_EXE=%~dp0.\pfcli.exe "%MAIN_EXE%" pfs %*
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pfsvc.bat
@echo off setlocal set MAIN_EXE=%~dp0.\pfcli.exe "%MAIN_EXE%" pfsvc %*
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/promptflow_service.vbs
DIM objshell set objshell = wscript.createobject("wscript.shell") iReturn = objshell.run("pfs.bat start --force", 0, true)
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pf.bat
@echo off setlocal SET PF_INSTALLER=MSI set MAIN_EXE=%~dp0.\pfcli.exe "%MAIN_EXE%" pf %*
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/version_info.txt
# UTF-8 # # For more details about fixed file info 'ffi' see: # http://msdn.microsoft.com/en-us/library/ms646997.aspx VSVersionInfo( ffi=FixedFileInfo( # filevers and prodvers should be always a tuple with four items: (1, 2, 3, 4) # Set not needed items to zero 0. filevers=($(env.FILE_VERSION)), prodvers=(1, 0, 0, 0), # Contains a bitmask that specifies the valid bits 'flags'r mask=0x3f, # Contains a bitmask that specifies the Boolean attributes of the file. flags=0x0, # The operating system for which this file was designed. # 0x4 - NT and there is no need to change it. OS=0x4, # The general type of file. # 0x1 - the file is an application. fileType=0x1, # The function of the file. # 0x0 - the function is not defined for this fileType subtype=0x0, # Creation date and time stamp. date=(0, 0) ), kids=[ StringFileInfo( [ StringTable( '040904E4', [StringStruct('CompanyName', 'Microsoft Corporation'), StringStruct('FileDescription', 'Microsoft prompt flow'), StringStruct('FileVersion', '1.0.0.0'), StringStruct('InternalName', 'setup'), StringStruct('LegalCopyright', 'Copyright (c) Microsoft Corporation. All rights reserved.'), StringStruct('ProductName', 'Microsoft prompt flow'), StringStruct('ProductVersion', '$(env.CLI_VERSION)')]) ]), VarFileInfo([VarStruct('Translation', [1033, 1252])]) ] )
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/pfcli.py
import sys import multiprocessing # use this file as the only entry point for the CLI to avoid packaging the same environment repeatedly if __name__ == "__main__": multiprocessing.freeze_support() command = sys.argv[1] if len(sys.argv) > 1 else None sys.argv = sys.argv[1:] if command == 'pf': from promptflow._cli._pf.entry import main as pf_main pf_main() elif command == 'pfazure': from promptflow._cli._pf_azure.entry import main as pfazure_main pfazure_main() elif command == 'pfs': from promptflow._sdk._service.entry import main as pfs_main pfs_main() elif command == 'pfsvc': from promptflow._sdk._service.pfsvc import init as pfsvc_init pfsvc_init() else: print(f"Invalid command {sys.argv}. Please use 'pf', 'pfazure', 'pfs' or 'pfsvc'.")
0
promptflow_repo/promptflow/scripts/installer/windows
promptflow_repo/promptflow/scripts/installer/windows/scripts/promptflow.spec
# -*- mode: python ; coding: utf-8 -*- from PyInstaller.utils.hooks import collect_data_files from PyInstaller.utils.hooks import copy_metadata datas = [('../resources/CLI_LICENSE.rtf', '.'), ('../../../../src/promptflow/NOTICE.txt', '.'), ('../../../../src/promptflow/promptflow/_sdk/data/executable/', './promptflow/_sdk/data/executable/'), ('../../../../src/promptflow-tools/promptflow/tools/', './promptflow/tools/'), ('./pf.bat', '.'), ('./pfs.bat', '.'), ('./pfazure.bat', '.'), ('./pfsvc.bat', '.')] datas += collect_data_files('streamlit') datas += copy_metadata('streamlit') datas += collect_data_files('streamlit_quill') datas += collect_data_files('promptflow') hidden_imports = ['streamlit.runtime.scriptrunner.magic_funcs', 'win32timezone', 'promptflow'] block_cipher = None pfcli_a = Analysis( ['pfcli.py'], pathex=[], binaries=[], datas=datas, hiddenimports=hidden_imports, hookspath=[], hooksconfig={}, runtime_hooks=[], excludes=[], win_no_prefer_redirects=False, win_private_assemblies=False, cipher=block_cipher, noarchive=False, ) pfcli_pyz = PYZ(pfcli_a.pure, pfcli_a.zipped_data, cipher=block_cipher) pfcli_exe = EXE( pfcli_pyz, pfcli_a.scripts, [], exclude_binaries=True, name='pfcli', debug=False, bootloader_ignore_signals=False, strip=False, upx=True, console=True, disable_windowed_traceback=False, argv_emulation=False, target_arch=None, codesign_identity=None, entitlements_file=None, contents_directory='.', icon='../resources/logo32.ico', version="./version_info.txt", ) coll = COLLECT( pfcli_exe, pfcli_a.binaries, pfcli_a.zipfiles, pfcli_a.datas, strip=False, upx=True, upx_exclude=[], name='promptflow', )
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/curl_install_pypi/README.md
# Curl Install Script Information The scripts in this directory are used for installing through curl and they point to the packages on PyPI. ## Install or update promptflow curl https://promptflowartifact.blob.core.windows.net/linux-install-scripts/install | bash The script can also be downloaded and run locally. You may have to restart your shell in order for the changes to take effect. ## Uninstall promptflow Uninstall the promptflow by directly deleting the files from the location chosen at the time of installation. 1. Remove the installed CLI files. ```bash # The default install/executable location is the user's home directory ($HOME). rm -r $HOME/lib/promptflow rm $HOME/bin/pf rm $HOME/bin/pfs rm $HOME/bin/pfazure ``` 2. Modify your `$HOME/.bash_profile` or `$HOME/.bashrc` file to remove the following line: ```text export PATH=$PATH:$HOME/bin ``` 3. If using `bash` or `zsh`, reload your shell's command cache. ```bash hash -r ```
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/curl_install_pypi/install
#!/usr/bin/env bash #--------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. #--------------------------------------------------------------------------------------------- # # Bash script to install the prompt flow # INSTALL_SCRIPT_URL="https://promptflowartifact.blob.core.windows.net/linux-install-scripts/install.py" _TTY=/dev/tty install_script=$(mktemp -t promptflow_install_tmp_XXXXXX) || exit echo "Downloading prompt flow install script from $INSTALL_SCRIPT_URL to $install_script." curl -# $INSTALL_SCRIPT_URL > $install_script || exit python_cmd=python3 if ! command -v python3 >/dev/null 2>&1 then echo "ERROR: python3 not found." echo "If python3 is available on the system, add it to PATH." exit 1 fi chmod 775 $install_script echo "Running install script." $python_cmd $install_script < $_TTY
0
promptflow_repo/promptflow/scripts/installer
promptflow_repo/promptflow/scripts/installer/curl_install_pypi/install.py
#!/usr/bin/env python # -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- # # This script will install the promptflow into a directory and create an executable # at a specified file path that is the entry point into the promptflow. # # The latest versions of all promptflow command packages will be installed. # import os import sys import platform import stat import tempfile import shutil import subprocess import hashlib PF_DISPATCH_TEMPLATE = """#!/usr/bin/env bash export PF_INSTALLER=Script {install_dir}/bin/python -m promptflow._cli._pf.entry "$@" """ PFAZURE_DISPATCH_TEMPLATE = """#!/usr/bin/env bash {install_dir}/bin/python -m promptflow._cli._pf_azure.entry "$@" """ PFS_DISPATCH_TEMPLATE = """#!/usr/bin/env bash {install_dir}/bin/python -m promptflow._sdk._service.entry "$@" """ DEFAULT_INSTALL_DIR = os.path.expanduser(os.path.join('~', 'lib', 'promptflow')) DEFAULT_EXEC_DIR = os.path.expanduser(os.path.join('~', 'bin')) PF_EXECUTABLE_NAME = 'pf' PFAZURE_EXECUTABLE_NAME = 'pfazure' PFS_EXECUTABLE_NAME = 'pfs' USER_BASH_RC = os.path.expanduser(os.path.join('~', '.bashrc')) USER_BASH_PROFILE = os.path.expanduser(os.path.join('~', '.bash_profile')) class CLIInstallError(Exception): pass def print_status(msg=''): print('-- '+msg) def prompt_input(msg): return input('\n===> '+msg) def prompt_input_with_default(msg, default): if default: return prompt_input("{} (leave blank to use '{}'): ".format(msg, default)) or default else: return prompt_input('{}: '.format(msg)) def prompt_y_n(msg, default=None): if default not in [None, 'y', 'n']: raise ValueError("Valid values for default are 'y', 'n' or None") y = 'Y' if default == 'y' else 'y' n = 'N' if default == 'n' else 'n' while True: ans = prompt_input('{} ({}/{}): '.format(msg, y, n)) if ans.lower() == n.lower(): return False if ans.lower() == y.lower(): return True if default and not ans: return default == y.lower() def exec_command(command_list, cwd=None, env=None): print_status('Executing: '+str(command_list)) subprocess.check_call(command_list, cwd=cwd, env=env) def create_tmp_dir(): tmp_dir = tempfile.mkdtemp() return tmp_dir def create_dir(dir): if not os.path.isdir(dir): print_status("Creating directory '{}'.".format(dir)) os.makedirs(dir) def is_valid_sha256sum(a_file, expected_sum): sha256 = hashlib.sha256() with open(a_file, 'rb') as f: sha256.update(f.read()) computed_hash = sha256.hexdigest() return expected_sum == computed_hash def create_virtualenv(install_dir): cmd = [sys.executable, '-m', 'venv', install_dir] exec_command(cmd) def install_cli(install_dir, tmp_dir): path_to_pip = os.path.join(install_dir, 'bin', 'pip') cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'promptflow[azure,executable,pfs,azureml-serving]', '--upgrade'] exec_command(cmd) cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'promptflow-tools', '--upgrade'] exec_command(cmd) cmd = [path_to_pip, 'install', '--cache-dir', tmp_dir, 'keyrings.alt', '--upgrade'] exec_command(cmd) def create_executable(exec_dir, install_dir): create_dir(exec_dir) exec_filepaths = [] for filename, template in [(PF_EXECUTABLE_NAME, PF_DISPATCH_TEMPLATE), (PFAZURE_EXECUTABLE_NAME, PFAZURE_DISPATCH_TEMPLATE), (PFS_EXECUTABLE_NAME, PFS_DISPATCH_TEMPLATE)]: exec_filepath = os.path.join(exec_dir, filename) with open(exec_filepath, 'w') as exec_file: exec_file.write(template.format(install_dir=install_dir)) cur_stat = os.stat(exec_filepath) os.chmod(exec_filepath, cur_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) print_status("The executable is available at '{}'.".format(exec_filepath)) exec_filepaths.append(exec_filepath) return exec_filepaths def get_install_dir(): install_dir = None while not install_dir: prompt_message = 'In what directory would you like to place the install?' install_dir = prompt_input_with_default(prompt_message, DEFAULT_INSTALL_DIR) install_dir = os.path.realpath(os.path.expanduser(install_dir)) if ' ' in install_dir: print_status("The install directory '{}' cannot contain spaces.".format(install_dir)) install_dir = None else: create_dir(install_dir) if os.listdir(install_dir): print_status("'{}' is not empty and may contain a previous installation.".format(install_dir)) ans_yes = prompt_y_n('Remove this directory?', 'n') if ans_yes: shutil.rmtree(install_dir) print_status("Deleted '{}'.".format(install_dir)) create_dir(install_dir) else: # User opted to not delete the directory so ask for install directory again install_dir = None print_status("We will install at '{}'.".format(install_dir)) return install_dir def get_exec_dir(): exec_dir = None while not exec_dir: prompt_message = (f"In what directory would you like to place the " f"'{PFS_EXECUTABLE_NAME}/{PFS_EXECUTABLE_NAME}/{PFAZURE_EXECUTABLE_NAME}' executable?") exec_dir = prompt_input_with_default(prompt_message, DEFAULT_EXEC_DIR) exec_dir = os.path.realpath(os.path.expanduser(exec_dir)) if ' ' in exec_dir: print_status("The executable directory '{}' cannot contain spaces.".format(exec_dir)) exec_dir = None create_dir(exec_dir) print_status("The executable will be in '{}'.".format(exec_dir)) return exec_dir def _backup_rc(rc_file): try: shutil.copyfile(rc_file, rc_file+'.backup') print_status("Backed up '{}' to '{}'".format(rc_file, rc_file+'.backup')) except (OSError, IOError): pass def _get_default_rc_file(): bashrc_exists = os.path.isfile(USER_BASH_RC) bash_profile_exists = os.path.isfile(USER_BASH_PROFILE) if not bashrc_exists and bash_profile_exists: return USER_BASH_PROFILE if bashrc_exists and bash_profile_exists and platform.system().lower() == 'darwin': return USER_BASH_PROFILE return USER_BASH_RC if bashrc_exists else None def _default_rc_file_creation_step(): rcfile = USER_BASH_PROFILE if platform.system().lower() == 'darwin' else USER_BASH_RC ans_yes = prompt_y_n('Could not automatically find a suitable file to use. Create {} now?'.format(rcfile), default='y') if ans_yes: open(rcfile, 'a').close() return rcfile return None def _find_line_in_file(file_path, search_pattern): try: with open(file_path, 'r', encoding="utf-8") as search_file: for line in search_file: if search_pattern in line: return True except (OSError, IOError): pass return False def _modify_rc(rc_file_path, line_to_add): if not _find_line_in_file(rc_file_path, line_to_add): with open(rc_file_path, 'a', encoding="utf-8") as rc_file: rc_file.write('\n'+line_to_add+'\n') def get_rc_file_path(): rc_file = None default_rc_file = _get_default_rc_file() if not default_rc_file: rc_file = _default_rc_file_creation_step() rc_file = rc_file or prompt_input_with_default('Enter a path to an rc file to update', default_rc_file) if rc_file: rc_file_path = os.path.realpath(os.path.expanduser(rc_file)) if os.path.isfile(rc_file_path): return rc_file_path print_status("The file '{}' could not be found.".format(rc_file_path)) return None def warn_other_azs_on_path(exec_dir, exec_filepath): env_path = os.environ.get('PATH') conflicting_paths = [] if env_path: for p in env_path.split(':'): for file in [PF_EXECUTABLE_NAME, PFAZURE_EXECUTABLE_NAME, PFS_EXECUTABLE_NAME]: p_to_pf = os.path.join(p, file) if p != exec_dir and os.path.isfile(p_to_pf): conflicting_paths.append(p_to_pf) if conflicting_paths: print_status() print_status(f"** WARNING: Other '{PFS_EXECUTABLE_NAME}/{PFS_EXECUTABLE_NAME}/{PFAZURE_EXECUTABLE_NAME}' " f"executables are on your $PATH. **") print_status("Conflicting paths: {}".format(', '.join(conflicting_paths))) print_status("You can run this installation of the promptflow with '{}'.".format(exec_filepath)) def handle_path_and_tab_completion(exec_filepath, exec_dir): ans_yes = prompt_y_n('Modify profile to update your $PATH now?', 'y') if ans_yes: rc_file_path = get_rc_file_path() if not rc_file_path: raise CLIInstallError('No suitable profile file found.') _backup_rc(rc_file_path) line_to_add = "export PATH=$PATH:{}".format(exec_dir) _modify_rc(rc_file_path, line_to_add) warn_other_azs_on_path(exec_dir, exec_filepath) print_status() print_status('** Run `exec -l $SHELL` to restart your shell. **') print_status() else: print_status("You can run the promptflow with '{}'.".format(exec_filepath)) def verify_python_version(): print_status('Verifying Python version.') v = sys.version_info if v < (3, 8): raise CLIInstallError('The promptflow does not support Python versions less than 3.8.') if 'conda' in sys.version: raise CLIInstallError("This script does not support the Python Anaconda environment. " "Create an Anaconda virtual environment and install with 'pip'") print_status('Python version {}.{}.{} okay.'.format(v.major, v.minor, v.micro)) def _native_dependencies_for_dist(verify_cmd_args, install_cmd_args, dep_list): try: print_status("Executing: '{} {}'".format(' '.join(verify_cmd_args), ' '.join(dep_list))) subprocess.check_output(verify_cmd_args + dep_list, stderr=subprocess.STDOUT) print_status('Native dependencies okay.') except subprocess.CalledProcessError: err_msg = 'One or more of the following native dependencies are not currently installed and may be required.\n' err_msg += '"{}"'.format(' '.join(install_cmd_args + dep_list)) print_status(err_msg) ans_yes = prompt_y_n('Missing native dependencies. Attempt to continue anyway?', 'n') if not ans_yes: raise CLIInstallError('Please install the native dependencies and try again.') def _get_linux_distro(): if platform.system() != 'Linux': return None, None try: with open('/etc/os-release') as lines: tokens = [line.strip() for line in lines] except Exception: return None, None release_info = {} for token in tokens: if '=' in token: k, v = token.split('=', 1) release_info[k.lower()] = v.strip('"') return release_info.get('name', None), release_info.get('version_id', None) def verify_install_dir_exec_path_conflict(install_dir, exec_dir): for exec_name in [PF_EXECUTABLE_NAME, PFAZURE_EXECUTABLE_NAME, PFS_EXECUTABLE_NAME]: exec_path = os.path.join(exec_dir, exec_name) if install_dir == exec_path: raise CLIInstallError("The executable file '{}' would clash with the install directory of '{}'. Choose " "either a different install directory or directory to place the " "executable.".format(exec_path, install_dir)) def main(): verify_python_version() tmp_dir = create_tmp_dir() install_dir = get_install_dir() exec_dir = get_exec_dir() verify_install_dir_exec_path_conflict(install_dir, exec_dir) create_virtualenv(install_dir) install_cli(install_dir, tmp_dir) exec_filepath = create_executable(exec_dir, install_dir) try: handle_path_and_tab_completion(exec_filepath, exec_dir) except Exception as e: print_status("Unable to set up PATH. ERROR: {}".format(str(e))) shutil.rmtree(tmp_dir) print_status("Installation successful.") print_status("Run the CLI with {} --help".format(exec_filepath)) if __name__ == '__main__': try: main() except CLIInstallError as cie: print('ERROR: '+str(cie), file=sys.stderr) sys.exit(1) except KeyboardInterrupt: print('\n\nExiting...') sys.exit(1) # SIG # Begin signature block # Z1F07ShfIJ7kejST2NXwW1QcFPEya4xaO2xZz6vLT847zaMzbc/PaEa1RKFlD881 # 4J+i6Au2wtbHzOXDisyH6WeLQ3gh0X2gxFRa4EzW7Nzjcvwm4+WogiTcnPVVxlk3 # qafM/oyVqs3695K7W5XttOiq2guv/yedsf/TW2BKSEKruFQh9IwDfIiBoi9Zv3wa # iuzQulRR8KyrCtjEPDV0t4WnZVB/edQea6xJZeTlMG+uLR/miBTbPhUb/VZkVjBf # qHBv623oLXICzoTNuaPTln9OWvL2NZpisGYvNzebKO7/Ho6AOWZNs5XOVnjs0Ax2 # aeXvlwBzIQyfyxd25487/Q== # SIG # End signature block
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/distributing/publish_package.py
import argparse import json import os import re from datetime import datetime, timedelta from azure.storage.blob import ( AccountSasPermissions, BlobServiceClient, ContentSettings, ResourceTypes, generate_account_sas, ) def get_connection_string(storage_account, storage_key): return f"DefaultEndpointsProtocol=https;AccountName={storage_account};AccountKey={storage_key};EndpointSuffix=core.windows.net" # noqa: E501 def get_object_sas_token(storage_account, storage_key): sas_token = generate_account_sas( account_name=storage_account, account_key=storage_key, resource_types=ResourceTypes(object=True), permission=AccountSasPermissions(read=True), expiry=datetime.utcnow() + timedelta(days=365), ) return sas_token def get_wheel_distribution_name(package_name): """The wheel filename is {distribution}-{version}(-{build tag})?-{python tag}-{abi tag}-{platform tag}.whl. The distribution name is normalized from the package name.""" return package_name.replace(".", "_").replace("-", "_").replace(" ", "_") def package_name_based_blob_prefix(package_name): """Convert package name to a valid blob prefix.""" prefix = package_name.replace(".", "-") prefix = prefix.replace("_", "-") prefix = prefix.lower() return prefix def override_version_with_latest(distribution_name): return re.sub("-([0-9.]*)-", "-latest-", distribution_name, count=1) def publish_package_internal(package_dir_path, storage_key, release_config): index = release_config["index"] index_config = config_json["targets"][index] storage_account = index_config["storage_account"] packages_container = index_config["packages_container"] index_container = index_config["index_container"] blob_prefix = index_config["blob_prefix"] pypi_endpoint = index_config["endpoint"] account_url = f"https://{storage_account}.blob.core.windows.net" wheel_pattern = re.compile(r".+\.whl$") whl_distributions = [d for d in os.listdir(package_dir_path) if wheel_pattern.match(d)] if len(whl_distributions) != 1: print( f"[Error] Found {len(whl_distributions)} wheel distributions in {package_dir_path}. " "There should be exactly one." ) exit(1) whl_distribution = whl_distributions[0] # Create the BlobServiceClient with connection string blob_service_client = BlobServiceClient.from_connection_string(get_connection_string(storage_account, storage_key)) container_client = blob_service_client.get_container_client(packages_container) # Upload the wheel package to blob storage package_blob = os.path.join(blob_prefix, whl_distribution) package_blob_client = blob_service_client.get_blob_client(container=packages_container, blob=package_blob) upload_file_path = os.path.join(package_dir_path, whl_distribution) with open(file=upload_file_path, mode="rb") as package_file: print(f"[Debug] Uploading {whl_distribution} to container: {packages_container}, blob: {package_blob}...") package_blob_client.upload_blob(package_file, overwrite=True) if upload_as_latest: latest_distribution = override_version_with_latest(whl_distribution) latest_package_blob = os.path.join(blob_prefix, latest_distribution) latest_package_blob_client = blob_service_client.get_blob_client( container=packages_container, blob=latest_package_blob ) upload_file_path = os.path.join(package_dir_path, whl_distribution) with open(file=upload_file_path, mode="rb") as package_file: print( f"[Debug] Uploading {whl_distribution} as latest distribution to " f"container: {packages_container}, blob: {latest_package_blob}..." ) latest_package_blob_client.upload_blob(package_file, overwrite=True) # List the blobs and generate download sas urls sas_token = get_object_sas_token(storage_account, storage_key) print(f"Listing wheel packages with prefix {blob_prefix} in container...") blob_list = container_client.list_blobs(name_starts_with=f"{blob_prefix}/") distribution_blobs = [d for d in blob_list if wheel_pattern.match(d.name)] # Reverse the list so that the latest distribution is at the top distribution_blobs.reverse() packages_indexes = {} # {package_name: [distributions]} for blob in distribution_blobs: distribution_name = blob.name.split("/")[-1] package_name = package_name_based_blob_prefix(distribution_name.split("-")[0]) print(f"[Debug] Blob: {blob.name}. Package distribution: {distribution_name}. Package name: {package_name}") download_link = f"{account_url}/{blob.container}/{blob.name}?{sas_token}" index_item = f"<a href='{download_link}' rel='external'>{distribution_name}</a><br/>" if package_name in packages_indexes: packages_indexes[package_name].append(index_item) else: packages_indexes[package_name] = [index_item] # Update index.html in the top level blob prefix for the project project_index_file = "project_index.html" with open(project_index_file, "w", encoding="utf8") as index_file: index_file.write("<!DOCTYPE html>\n") index_file.write( "<html lang='en'><head><meta charset='utf-8'>" "<meta name='api-version' value='2'/>" "<title>Simple Index</title></head><body>\n" ) for package_name in packages_indexes: package_index_url = f"https://{pypi_endpoint}/{blob_prefix}/{package_name}" print(f"[Debug] Updated package_index_url: {package_index_url}") index_file.write(f"<a href='{package_index_url}'>{package_name}</a><br/>\n") index_file.write("</body></html>\n") project_index_blob = os.path.join(blob_prefix, "index.html") project_index_blob_client = blob_service_client.get_blob_client(container=index_container, blob=project_index_blob) content_settings = ContentSettings(content_type="text/html") with open(file=project_index_file, mode="rb") as index: print(f"Uploading {project_index_file} to container: {index_container}, blob: {project_index_blob}...") project_index_blob_client.upload_blob(index, overwrite=True, content_settings=content_settings) # Update index.html for the package distributions for package_name, distribution_indexes in packages_indexes.items(): package_index_file = f"{package_name}_index.html" if len(distribution_indexes) > 0: print(f"{len(distribution_indexes)} distributions found for package {package_name}. Updating index.html...") with open(package_index_file, "w", encoding="utf8") as index_file: index_file.write("<!DOCTYPE html>\n") index_file.write( f"<html lang='en'><head><meta charset='utf-8'><title>{package_name}</title></head><body>\n" ) for item in distribution_indexes: index_file.write(f"{item}\n") index_file.write("</body></html>\n") # Update the index.html to the blob with prefix: <blob_prefix>/<normalized package_name> index_blob = os.path.join(blob_prefix, package_name, "index.html") index_blob_client = blob_service_client.get_blob_client(container=index_container, blob=index_blob) content_settings = ContentSettings(content_type="text/html") with open(file=package_index_file, mode="rb") as index: print(f"Uploading {package_index_file} to container: {index_container}, blob: {index_blob}...") index_blob_client.upload_blob(index, overwrite=True, content_settings=content_settings) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--config", type=str) parser.add_argument("--src_folder_name", type=str) parser.add_argument("--package_dir_path", type=str) parser.add_argument("--storage_key", type=str) parser.add_argument("--upload_as_latest", type=str, default="False") parser.add_argument("--pypi_type", type=str, default="internal") # internal or public pypi parser.add_argument("--release_type", type=str, default="release") # release or test args = parser.parse_args() print("[Debug] Arguments:") print(f"[Debug] config: {args.config}") print(f"[Debug] src_folder_name: {args.src_folder_name}") print(f"[Debug] package_dir_path: {args.package_dir_path}") upload_as_latest = args.upload_as_latest.lower() == "true" print(f"[Debug] upload_as_latest: {args.upload_as_latest}. Boolean upload_as_latest: {upload_as_latest}") print(f"[Debug] pypi_type: {args.pypi_type}") print(f"[Debug] release_type: {args.release_type}") cwd = os.getcwd() print(f"Current working directory: {cwd}") with open(os.path.join(os.getcwd(), args.config), "r") as config_file: config_json = json.load(config_file) package_dir_path = os.path.join(cwd, args.package_dir_path) release_config = config_json["releases"][args.pypi_type][f"{args.src_folder_name}-{args.release_type}"] if args.pypi_type == "internal": publish_package_internal(package_dir_path, args.storage_key, release_config)
0
promptflow_repo/promptflow/scripts/distributing
promptflow_repo/promptflow/scripts/distributing/configs/distribution_settings.json
{ "releases":{ "internal":{ "promptflow-tools-release":{ "index": "internal-index-release" }, "promptflow-tools-test":{ "index": "internal-index-test" } } }, "targets": { "internal-index-release": { "storage_account": "azuremlsdktestpypi", "packages_container": "repo", "index_container": "wheels", "blob_prefix": "promptflow", "endpoint": "azuremlsdktestpypi.azureedge.net" }, "internal-index-test": { "storage_account": "azuremlsdktestpypi", "packages_container": "repo", "index_container": "wheels", "blob_prefix": "test-promptflow", "endpoint": "azuremlsdktestpypi.azureedge.net" } } }
0
promptflow_repo/promptflow/scripts/distributing
promptflow_repo/promptflow/scripts/distributing/configs/promptflow-tools-release-env.yaml
name: release-env channels: - defaults - conda-forge dependencies: - python=3.8 - pip - pip: - setuptools - twine==4.0.0 - azure-storage-blob==12.16.0
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/release/promptflow-release-note.md
We are pleased to announce the release of promptflow {{VERSION}}. This release includes some new features, bug fixes, and improvements. We recommend that all users upgrade to this version. See the [CHANGELOG](https://github.com/microsoft/promptflow/blob/release/promptflow/{{VERSION}}/src/promptflow/CHANGELOG.md) for a list of all the changes. The release will be available via PyPI: ```bash pip install --upgrade promptflow ``` Please report any issues with the release on the [promptflow issue tracker](https://github.com/microsoft/promptflow/issues). Thanks to all the contributors who made this release possible.
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/runtime_mgmt/update-runtime.py
# --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import argparse import time from pathlib import Path import requests from azure.ai.ml import MLClient, load_environment from azure.identity import AzureCliCredential ENVIRONMENT_YAML = Path(__file__).parent / "runtime-env" / "env.yaml" EXAMPLE_RUNTIME_NAME = "example-runtime-ci" TEST_RUNTIME_NAME = "test-runtime-ci" class PFSRuntimeHelper: def __init__(self, ml_client: MLClient): subscription_id = ml_client._operation_scope.subscription_id resource_group_name = ml_client._operation_scope.resource_group_name workspace_name = ml_client._operation_scope.workspace_name location = ml_client.workspaces.get().location self._request_url_prefix = ( f"https://{location}.api.azureml.ms/flow/api/subscriptions/{subscription_id}" f"/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices" f"/workspaces/{workspace_name}/FlowRuntimes" ) token = ml_client._credential.get_token("https://management.azure.com/.default").token self._headers = {"Authorization": f"Bearer {token}"} def update_runtime(self, name: str, env_asset_id: str) -> None: body = { "runtimeDescription": "Runtime hosted on compute instance, serves for examples checks.", "environment": env_asset_id, "instanceCount": "", } response = requests.put( f"{self._request_url_prefix}/{name}", headers=self._headers, json=body, ) response.raise_for_status() def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument("--path", help="Path to config.json", type=str) return parser.parse_args() def init_ml_client( subscription_id: str, resource_group_name: str, workspace_name: str, ) -> MLClient: return MLClient( credential=AzureCliCredential(), subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, ) def create_environment(ml_client: MLClient) -> str: environment = load_environment(source=ENVIRONMENT_YAML) env = ml_client.environments.create_or_update(environment) # have observed delay between environment creation and asset id availability while True: try: ml_client.environments.get(name=env.name, version=env.version) break except Exception: time.sleep(10) # get workspace id from REST workspace object resource_group_name = ml_client._operation_scope.resource_group_name workspace_name = ml_client._operation_scope.workspace_name location = ml_client.workspaces.get().location workspace_id = ml_client._workspaces._operation.get( resource_group_name=resource_group_name, workspace_name=workspace_name ).workspace_id # concat environment asset id asset_id = ( f"azureml://locations/{location}/workspaces/{workspace_id}" f"/environments/{env.name}/versions/{env.version}" ) return asset_id def main(args: argparse.Namespace): subscription_id, resource_group_name, workspace_name = MLClient._get_workspace_info(args.path) ml_client = init_ml_client( subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name, ) pfs_runtime_helper = PFSRuntimeHelper(ml_client=ml_client) print("creating environment...") env_asset_id = create_environment(ml_client=ml_client) print("created environment, asset id:", env_asset_id) print("updating runtime for test...") pfs_runtime_helper.update_runtime(name=TEST_RUNTIME_NAME, env_asset_id=env_asset_id) print("updating runtime for example...") pfs_runtime_helper.update_runtime(name=EXAMPLE_RUNTIME_NAME, env_asset_id=env_asset_id) print("runtime updated!") if __name__ == "__main__": main(args=parse_args())
0
promptflow_repo/promptflow/scripts/runtime_mgmt
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env/env.yaml
$schema: https://azuremlschemas.azureedge.net/latest/environment.schema.json name: chat-with-pdf build: path: context inference_config: liveness_route: port: 8080 path: /health readiness_route: port: 8080 path: /health scoring_route: port: 8080 path: /score
0
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env/context/Dockerfile
FROM mcr.microsoft.com/azureml/promptflow/promptflow-runtime:latest COPY ./requirements.txt ./ RUN pip install --no-cache-dir -r requirements.txt
0
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env
promptflow_repo/promptflow/scripts/runtime_mgmt/runtime-env/context/requirements.txt
PyPDF2 faiss-cpu openai jinja2 python-dotenv tiktoken
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/extract_steps_from_readme.py
import argparse from pathlib import Path from jinja2 import Environment, FileSystemLoader from ghactions_driver.readme_parse import readme_parser from ghactions_driver.readme_step import ReadmeStepsManage def write_readme_shell(readme_path: str, output_folder: str): full_text = readme_parser(readme_path) Path(ReadmeStepsManage.git_base_dir()) bash_script_path = ( Path(ReadmeStepsManage.git_base_dir()) / output_folder / "bash_script.sh" ) template_env = Environment( loader=FileSystemLoader( Path(ReadmeStepsManage.git_base_dir()) / "scripts/readme/ghactions_driver/bash_script" ) ) bash_script_template = template_env.get_template("bash_script.sh.jinja2") with open(bash_script_path, "w") as f: f.write(bash_script_template.render({"command": full_text})) if __name__ == "__main__": # setup argparse parser = argparse.ArgumentParser() parser.add_argument( "-f", "--readme-file", help="Input README.md example 'examples/flows/standard/basic/README.md'", ) parser.add_argument( "-o", "--output-folder", help="Output folder for bash_script.sh example 'examples/flows/standard/basic/'", ) args = parser.parse_args() write_readme_shell(args.readme_file, args.output_folder)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/README.md
# Readme Workflow Generator These tools is used to generate workflows from README.md and python notebook files in the [examples](../../examples/) folder. * Generated workflows will be placed in [.github/workflows/samples_*](../../.github/workflows/) folder. * The script will also generate a new explanation [README.md](../../examples/README.md) for all the examples. ## 1. Install dependencies ```bash pip install -r ../../examples/requirements.txt pip install -r ../../examples/dev_requirements.txt ``` ## 2. Generate workflows ### (Option 1) One Step Generation At the **root** of the repository, run the following command: ```bash python scripts/readme/readme.py ``` ### (Option 2) Step by Step Generation At the **root** of the repository, run the following command: ```bash # Generate workflow from README.md inside examples folder python scripts/readme/readme_generator.py -g "examples/**/*.ipynb" # Generate workflow from python notebook inside examples folder python scripts/readme/workflow_generator.py -g "examples/flows/**/README.md" ``` Multiple inputs are supported. ## 3. Options to control generations of examples [README.md](../../examples/README.md) ### 3.1 Notebook Workflow Generation * Each workflow contains metadata area, set `.metadata.description` area will display this message in the corresponding cell in [README.md](../../examples/README.md) file. * When set `.metadata.no_readme_generation` to value `true`, the script will stop generating for this notebook. ### 3.2 README.md Workflow Generation * For README.md files, only `bash` cells will be collected and converted to workflow. No cells will produce no workflow. * Readme descriptions are simply collected from the first sentence in the README.md file just below the title. The script will collect words before the first **.** of the fist paragraph. Multi-line sentence is also supported * A supported description sentence: `This is a sample workflow for testing.` * A not supported description sentence: `Please check www.microsoft.com for more details.`
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/workflow_generator.py
import os import glob import argparse from pathlib import Path import ntpath import re import hashlib import json from jinja2 import Environment, FileSystemLoader from ghactions_driver.readme_step import ReadmeStepsManage from ghactions_driver.resource_resolver import resolve_tutorial_resource from ghactions_driver.telemetry_obj import Telemetry def format_ipynb(notebooks): # run code formatter on .ipynb files for notebook in notebooks: os.system(f"black-nb --clear-output {notebook}") def _get_paths(paths_list): """ Convert the path list to unix format. :param paths_list: The input path list. :returns: The same list with unix-like paths. """ paths_list.sort() if ntpath.sep == os.path.sep: return [pth.replace(ntpath.sep, "/") for pth in paths_list] return paths_list def write_notebook_workflow(notebook, name, output_telemetry=Telemetry()): temp_name_list = re.split(r"/|\.", notebook) temp_name_list = [ x for x in temp_name_list if x != "tutorials" and x != "examples" and x != "ipynb" ] temp_name_list = [x.replace("-", "") for x in temp_name_list] workflow_name = "_".join(["samples"] + temp_name_list) place_to_write = ( Path(ReadmeStepsManage.git_base_dir()) / ".github" / "workflows" / f"{workflow_name}.yml" ) gh_working_dir = "/".join(notebook.split("/")[:-1]) env = Environment( loader=FileSystemLoader("./scripts/readme/ghactions_driver/workflow_templates") ) template = env.get_template("basic_workflow.yml.jinja2") # Schedule notebooks at different times to reduce maximum quota usage. name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16) schedule_minute = name_hash % 60 schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC if "tutorials" in gh_working_dir: notebook_path = Path(ReadmeStepsManage.git_base_dir()) / str(notebook) path_filter = resolve_tutorial_resource(workflow_name, notebook_path.resolve()) elif "samples_configuration" in workflow_name: # exception, samples configuration is very simple and not related to other prompt flow examples path_filter = ( "[ examples/configuration.ipynb, .github/workflows/samples_configuration.yml ]" ) else: path_filter = f"[ {gh_working_dir}/**, examples/*requirements.txt, .github/workflows/{workflow_name}.yml ]" # these workflows require config.json to init PF/ML client workflows_require_config_json = [ "configuration", "flowinpipeline", "quickstartazure", "cloudrunmanagement", ] if any(keyword in workflow_name for keyword in workflows_require_config_json): template = env.get_template("workflow_config_json.yml.jinja2") elif "chatwithpdf" in workflow_name: template = env.get_template("pdf_workflow.yml.jinja2") elif "flowasfunction" in workflow_name: template = env.get_template("flow_as_function.yml.jinja2") content = template.render( { "workflow_name": workflow_name, "ci_name": "samples_notebook_ci", "name": name, "gh_working_dir": gh_working_dir, "path_filter": path_filter, "crontab": f"{schedule_minute} {schedule_hour} * * *", "crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT", } ) # To customize workflow, add new steps in steps.py # make another function for special cases. with open(place_to_write.resolve(), "w") as f: f.write(content) print(f"Write workflow: {place_to_write.resolve()}") output_telemetry.workflow_name = workflow_name output_telemetry.name = name output_telemetry.gh_working_dir = gh_working_dir output_telemetry.path_filter = path_filter def write_workflows(notebooks, output_telemetries=[]): # process notebooks for notebook in notebooks: # get notebook name output_telemetry = Telemetry() nb_path = Path(notebook) name, _ = os.path.splitext(nb_path.parts[-1]) # write workflow file write_notebook_workflow(notebook, name, output_telemetry) output_telemetry.notebook = nb_path output_telemetries.append(output_telemetry) def local_filter(callback, array): results = [] for index, item in enumerate(array): result = callback(item, index, array) # if returned true, append item to results if result: results.append(item) return results def no_readme_generation_filter(item, index, array) -> bool: """ Set each ipynb metadata no_readme_generation to "true" to skip readme generation """ try: if item.endswith("test.ipynb"): return False # read in notebook with open(item, "r", encoding="utf-8") as f: data = json.load(f) try: if data["metadata"]["no_readme_generation"] is not None: # no_readme_generate == "true", then no generation return data["metadata"]["no_readme_generation"] != "true" except Exception: return True # generate readme except Exception: return False # not generate readme def main(input_glob, output_files=[], check=False): # get list of workflows notebooks = _get_paths( [j for i in [glob.glob(p, recursive=True) for p in input_glob] for j in i] ) # check each workflow, get metadata. notebooks = local_filter(no_readme_generation_filter, notebooks) # format code if not check: format_ipynb(notebooks) # write workflows write_workflows(notebooks, output_files) # run functions if __name__ == "__main__": # setup argparse parser = argparse.ArgumentParser() parser.add_argument( "-g", "--input-glob", nargs="+", help="Input glob example 'examples/**/*.ipynb'" ) args = parser.parse_args() # call main main(input_glob=args.input_glob)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/schema_checker.py
from promptflow._sdk._load_functions import load_yaml from promptflow._sdk._pf_client import PFClient from ghactions_driver.readme_step import ReadmeStepsManage from pathlib import Path import os import subprocess import sys def install(filename): subprocess.check_call([sys.executable, "-m", "pip", "install", "-r", filename]) def main(input_glob_flow_dag): # check if flow.dag.yaml contains schema field. error = False globs = set() pf_client = PFClient() for p in input_glob_flow_dag: globs = globs | set(Path(ReadmeStepsManage.git_base_dir()).glob(p)) flow_dag_items = sorted([i for i in globs]) for file in flow_dag_items: data = load_yaml(file) if "$schema" not in data.keys(): print(f"{file} does not contain $schema field.") error = True if error is False: new_links = [] if (Path(file).parent / "requirements.txt").exists(): install(Path(file).parent / "requirements.txt") if "flow-with-symlinks" in str(file): saved_path = os.getcwd() os.chdir(str(file.parent)) source_folder = Path("../web-classification") for file_name in os.listdir(source_folder): if not Path(file_name).exists(): os.symlink( source_folder / file_name, file_name ) new_links.append(file_name) validation_result = pf_client.flows.validate( flow=file, ) if "flow-with-symlinks" in str(file): for link in new_links: os.remove(link) os.chdir(saved_path) print(f"VALIDATE {file}: \n" + repr(validation_result)) if not validation_result.passed: print(f"{file} is not valid.") error = True if len(validation_result._warnings) > 0: print(f"{file} has warnings.") error = True if error: raise Exception("Some flow.dag.yaml validation failed.") else: print("All flow.dag.yaml validation completed.") if __name__ == "__main__": input_glob_flow_dag = [ "examples/**/flow.dag.yaml", ] main(input_glob_flow_dag)
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/readme.py
# Generate Readme file for the examples folder import json from pathlib import Path import workflow_generator import readme_generator from jinja2 import Environment, FileSystemLoader from ghactions_driver.readme_step import ReadmeStepsManage from operator import itemgetter import argparse import sys import os import re BRANCH = "main" def get_notebook_readme_description(notebook) -> str: """ Set each ipynb metadata description at .metadata.description """ try: # read in notebook with open(notebook, "r", encoding="utf-8") as f: data = json.load(f) return data["metadata"]["description"] except Exception: print(f"{notebook} metadata description not set") return "" def get_readme_description_first_sentence(readme) -> str: """ Get each readme first sentence of first paragraph """ try: with open(readme, "r", encoding="utf-8") as f: # read first line line = f.readline() sentence = "" while True: line = f.readline() if line.startswith("#"): line = "" # skip metadata section if line.startswith("---") or line.startswith("resources"): line = "" if line.strip() == "" and sentence != "": break elif "." in line: sentence += " " + line.split(".")[0].strip() break else: if sentence == "": sentence += line.strip() elif line.strip() != "": sentence += " " + line.strip() return sentence except Exception: print(f"Error during reading {readme}") return "" def write_readme(workflow_telemetries, readme_telemetries): global BRANCH ReadmeStepsManage.git_base_dir() readme_file = Path(ReadmeStepsManage.git_base_dir()) / "examples/README.md" quickstarts = { "readmes": [], "notebooks": [], } tutorials = { "readmes": [], "notebooks": [], } flows = { "readmes": [], "notebooks": [], } evaluations = { "readmes": [], "notebooks": [], } chats = { "readmes": [], "notebooks": [], } toolusecases = { "readmes": [], "notebooks": [], } connections = { "readmes": [], "notebooks": [], } for workflow_telemetry in workflow_telemetries: notebook_name = f"{workflow_telemetry.name}.ipynb" gh_working_dir = workflow_telemetry.gh_working_dir pipeline_name = workflow_telemetry.workflow_name yaml_name = f"{pipeline_name}.yml" # For workflows, open ipynb as raw json and # setup description at .metadata.description description = get_notebook_readme_description(workflow_telemetry.notebook) notebook_path = gh_working_dir.replace("examples/", "") + f"/{notebook_name}" if gh_working_dir.startswith("examples/flows/standard"): flows["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/connections"): connections["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/flows/evaluation"): evaluations["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/tutorials"): if "quickstart" in notebook_name: quickstarts["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) else: tutorials["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/flows/chat"): chats["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif gh_working_dir.startswith("examples/tools/use-cases"): toolusecases["notebooks"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) else: print(f"Unknown workflow type: {gh_working_dir}") # Adjust tutorial names: for readme_telemetry in readme_telemetries: if readme_telemetry.readme_name.endswith("README.md"): notebook_name = readme_telemetry.readme_folder.split("/")[-1] else: notebook_name = readme_telemetry.readme_name.split("/")[-1].replace( ".md", "" ) notebook_path = readme_telemetry.readme_name.replace("examples/", "") pipeline_name = readme_telemetry.workflow_name yaml_name = f"{readme_telemetry.workflow_name}.yml" description = get_readme_description_first_sentence( readme_telemetry.readme_name ) readme_folder = readme_telemetry.readme_folder if readme_folder.startswith("examples/flows/standard"): flows["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/connections"): connections["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/flows/evaluation"): evaluations["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/tutorials"): if "quickstart" in notebook_name: quickstarts["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) else: tutorials["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/flows/chat"): chats["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) elif readme_folder.startswith("examples/tools/use-cases"): toolusecases["readmes"].append( { "name": notebook_name, "path": notebook_path, "pipeline_name": pipeline_name, "yaml_name": yaml_name, "description": description, } ) else: print(f"Unknown workflow type: {readme_folder}") quickstarts["notebooks"] = sorted( quickstarts["notebooks"], key=itemgetter("name"), reverse=True, ) replacement = { "branch": BRANCH, "tutorials": tutorials, "flows": flows, "evaluations": evaluations, "chats": chats, "toolusecases": toolusecases, "connections": connections, "quickstarts": quickstarts, } print("writing README.md...") env = Environment( loader=FileSystemLoader( Path(ReadmeStepsManage.git_base_dir()) / "scripts/readme/ghactions_driver/readme_templates" ) ) template = env.get_template("README.md.jinja2") with open(readme_file, "w") as f: f.write(template.render(replacement)) print("finished writing README.md") def main(check): if check: # Disable print sys.stdout = open(os.devnull, "w") input_glob = ["examples/**/*.ipynb"] workflow_telemetry = [] workflow_generator.main(input_glob, workflow_telemetry, check=check) input_glob_readme = [ "examples/flows/**/README.md", "examples/connections/**/README.md", "examples/tutorials/e2e-development/*.md", "examples/tutorials/flow-fine-tuning-evaluation/*.md", "examples/tutorials/**/README.md", "examples/tools/use-cases/**/README.md", ] # exclude the readme since this is 3p integration folder, pipeline generation is not included input_glob_readme_exclude = ["examples/flows/integrations/**/README.md"] readme_telemetry = [] readme_generator.main( input_glob_readme, input_glob_readme_exclude, readme_telemetry ) write_readme(workflow_telemetry, readme_telemetry) if check: output_object = {} for workflow in workflow_telemetry: workflow_items = re.split(r"\[|,| |\]", workflow.path_filter) workflow_items = list(filter(None, workflow_items)) output_object[workflow.workflow_name] = [] for item in workflow_items: if item == "examples/*requirements.txt": output_object[workflow.workflow_name].append( "examples/requirements.txt" ) output_object[workflow.workflow_name].append( "examples/dev_requirements.txt" ) continue output_object[workflow.workflow_name].append(item) for readme in readme_telemetry: output_object[readme.workflow_name] = [] readme_items = re.split(r"\[|,| |\]", readme.path_filter) readme_items = list(filter(None, readme_items)) for item in readme_items: if item == "examples/*requirements.txt": output_object[readme.workflow_name].append( "examples/requirements.txt" ) output_object[readme.workflow_name].append( "examples/dev_requirements.txt" ) continue output_object[readme.workflow_name].append(item) # enable output sys.stdout = sys.__stdout__ return output_object else: return "" if __name__ == "__main__": # setup argparse parser = argparse.ArgumentParser() parser.add_argument( "-c", "--check", action="store_true", help="Check what file is affected" ) args = parser.parse_args() output = main(args.check) print(json.dumps(output))
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/readme/readme_generator.py
import argparse from pathlib import Path from functools import reduce from ghactions_driver.readme_workflow_generate import write_readme_workflow from ghactions_driver.readme_step import ReadmeStepsManage, ReadmeSteps from ghactions_driver.readme_parse import readme_parser from ghactions_driver.telemetry_obj import Telemetry def local_filter(callback, array: [Path]): results = [] for index, item in enumerate(array): result = callback(item, index, array) # if returned true, append item to results if result: results.append(item) return results def no_readme_generation_filter(item: Path, index, array) -> bool: """ If there is no steps in the readme, then no generation """ try: if 'build' in str(item): # skip build folder return False full_text = readme_parser(item.relative_to(ReadmeStepsManage.git_base_dir())) if full_text == "": return False else: return True except Exception as error: print(error) return False # generate readme def main(input_glob, exclude_glob=[], output_files=[]): def set_add(p, q): return p | q def set_difference(p, q): return p - q globs = reduce(set_add, [set(Path(ReadmeStepsManage.git_base_dir()).glob(p)) for p in input_glob], set()) globs_exclude = reduce(set_difference, [set(Path(ReadmeStepsManage.git_base_dir()).glob(p)) for p in exclude_glob], globs) readme_items = sorted([i for i in globs_exclude]) readme_items = local_filter(no_readme_generation_filter, readme_items) for readme in readme_items: readme_telemetry = Telemetry() workflow_name = readme.relative_to(ReadmeStepsManage.git_base_dir()) # Deal with readme write_readme_workflow(workflow_name.resolve(), readme_telemetry) ReadmeSteps.cleanup() output_files.append(readme_telemetry) if __name__ == "__main__": # setup argparse parser = argparse.ArgumentParser() parser.add_argument( "-g", "--input-glob", nargs="+", help="Input Readme.md glob example 'examples/flows/**/Readme.md'", ) args = parser.parse_args() # call main main(args.input_glob)
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/telemetry_obj.py
class Telemetry(object): pass
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/readme_step.py
import subprocess from pathlib import Path import hashlib from jinja2 import Environment, FileSystemLoader, Template from .telemetry_obj import Telemetry class Step: """ StepType in workflow """ Environment = None @staticmethod def init_jinja_loader() -> Environment: jinja_folder_path = ( Path(ReadmeStepsManage.git_base_dir()) / "scripts" / "readme" / "ghactions_driver" / "workflow_steps" ) Step.Environment = Environment( loader=FileSystemLoader(jinja_folder_path.resolve()) ) def __init__(self, name: str) -> None: self.workflow_name = name def get_workflow_step(self) -> str: # virtual method for override return "" @staticmethod def get_workflow_template(step_file_name: str) -> Template: # virtual method for override if Step.Environment is None: Step.init_jinja_loader() template = Step.Environment.get_template(step_file_name) return template class AzureLoginStep(Step): def __init__(self) -> None: Step.__init__(self, "Azure Login") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_azure_login.yml.jinja2") return template.render( { "step_name": self.workflow_name, } ) class InstallDependenciesStep(Step): def __init__(self) -> None: Step.__init__(self, "Prepare requirements") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_install_deps.yml.jinja2") return template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, } ) class InstallDevDependenciesStep(Step): def __init__(self) -> None: Step.__init__(self, "Prepare dev requirements") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_install_dev_deps.yml.jinja2") return template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, } ) class CreateAoaiFromYaml(Step): def __init__(self, yaml_name: str) -> None: Step.__init__(self, "Create AOAI Connection from YAML") self.yaml_name = yaml_name def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_yml_create_aoai.yml.jinja2") return template.render( { "step_name": self.workflow_name, "yaml_name": self.yaml_name, } ) class ExtractStepsAndRun(Step): def __init__(self) -> None: Step.__init__(self, f"Extract Steps {ReadmeSteps.readme_name}") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_extract_steps_and_run.yml.jinja2") return template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, "readme_name": ReadmeSteps.readme_name, } ) class ExtractStepsAndRunGPTFour(Step): def __init__(self) -> None: Step.__init__(self, f"Extract Steps {ReadmeSteps.readme_name}") def get_workflow_step(self) -> str: template = Step.get_workflow_template( "step_extract_steps_and_run_gpt4.yml.jinja2" ) return template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, "readme_name": ReadmeSteps.readme_name, } ) class CreateEnv(Step): def __init__(self) -> None: Step.__init__(self, "Refine .env file") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_create_env.yml.jinja2") content = template.render( {"step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir} ) return content class CreateEnvGPTFour(Step): def __init__(self) -> None: Step.__init__(self, "Refine .env file") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_create_env_gpt4.yml.jinja2") content = template.render( {"step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir} ) return content class CreateAoaiFromEnv(Step): def __init__(self, connection_name: str) -> None: Step.__init__(self, "Create AOAI Connection from ENV file") self.connection_name = connection_name def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_env_create_aoai.yml.jinja2") content = template.render( { "step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir, "connection_name": self.connection_name, } ) return content class CreateRunYaml(Step): def __init__(self) -> None: Step.__init__(self, "Create run.yml") def get_workflow_step(self) -> str: template = Step.get_workflow_template("step_create_run_yml.yml.jinja2") content = template.render( {"step_name": self.workflow_name, "working_dir": ReadmeSteps.working_dir} ) return content class ReadmeSteps: """ Static class to record steps, to be filled in workflow templates and Readme """ step_array = [] # Record steps readme_name = "" # Record readme name working_dir = "" # the working directory of flow, relative to git_base_dir template = "" # Select a base template under workflow_templates folder workflow = "" # Target workflow name to be generated @staticmethod def remember_step(step: Step) -> Step: ReadmeSteps.step_array.append(step) return step @staticmethod def get_length() -> int: return len(ReadmeSteps.step_array) # region steps @staticmethod def create_env() -> Step: return ReadmeSteps.remember_step(CreateEnv()) @staticmethod def create_env_gpt4() -> Step: return ReadmeSteps.remember_step(CreateEnvGPTFour()) @staticmethod def yml_create_aoai(yaml_name: str) -> Step: return ReadmeSteps.remember_step(CreateAoaiFromYaml(yaml_name=yaml_name)) @staticmethod def env_create_aoai(connection_name: str) -> Step: return ReadmeSteps.remember_step( CreateAoaiFromEnv(connection_name=connection_name) ) @staticmethod def azure_login() -> Step: return ReadmeSteps.remember_step(AzureLoginStep()) @staticmethod def install_dependencies() -> Step: return ReadmeSteps.remember_step(InstallDependenciesStep()) @staticmethod def install_dev_dependencies() -> Step: return ReadmeSteps.remember_step(InstallDevDependenciesStep()) @staticmethod def create_run_yaml() -> Step: return ReadmeSteps.remember_step(CreateRunYaml()) @staticmethod def extract_steps_and_run() -> Step: return ReadmeSteps.remember_step(ExtractStepsAndRun()) @staticmethod def extract_steps_and_run_gpt_four() -> Step: return ReadmeSteps.remember_step(ExtractStepsAndRunGPTFour()) # endregion steps @staticmethod def setup_target( working_dir: str, template: str, target: str, readme_name: str ) -> str: """ Used at the very head of jinja template to indicate basic information """ ReadmeSteps.working_dir = working_dir ReadmeSteps.template = template ReadmeSteps.workflow = target ReadmeSteps.step_array = [] ReadmeSteps.readme_name = readme_name return "" @staticmethod def cleanup() -> None: ReadmeSteps.working_dir = "" ReadmeSteps.template = "" ReadmeSteps.workflow = "" ReadmeSteps.step_array = [] class ReadmeStepsManage: """ # Static methods for manage all readme steps """ repo_base_dir = "" @staticmethod def git_base_dir() -> str: """ Get the base directory of the git repo """ if ReadmeStepsManage.repo_base_dir == "": try: ReadmeStepsManage.repo_base_dir = ( subprocess.check_output(["git", "rev-parse", "--show-toplevel"]) .decode("utf-8") .strip() ) raise Exception("Not in git repo") except Exception: ReadmeStepsManage.repo_base_dir = Path(__file__).parent.parent.parent.parent.resolve() print(ReadmeStepsManage.repo_base_dir) return ReadmeStepsManage.repo_base_dir @staticmethod def write_workflow( workflow_name: str, pipeline_name: str, output_telemetry=Telemetry() ) -> None: # Schedule notebooks at different times to reduce maximum quota usage. name_hash = int(hashlib.sha512(workflow_name.encode()).hexdigest(), 16) schedule_minute = name_hash % 60 schedule_hour = (name_hash // 60) % 4 + 19 # 19-22 UTC if "tutorials" in workflow_name: # markdown filename has some exceptions, special handle here if "chat_with_pdf" in workflow_name: readme_name = "chat-with-pdf.md" elif ( "fine_tuning_evaluation_promptflow_quality_improvement" in workflow_name ): readme_name = "promptflow-quality-improvement.md" else: readme_name = "README.md" readme_path = ( Path(ReadmeStepsManage.git_base_dir()) / ReadmeSteps.working_dir / readme_name ) # local import to avoid circular import from .resource_resolver import resolve_tutorial_resource path_filter = resolve_tutorial_resource( workflow_name, readme_path.resolve() ) else: if ( "flow_with_additional_includes" in workflow_name or "flow_with_symlinks" in workflow_name ): # these two flows have dependencies on flow web-classification # so corresponding workflows should also listen to changes in web-classification path_filter = ( f"[ {ReadmeSteps.working_dir}/**, " + "examples/*requirements.txt, " + "examples/flows/standard/web-classification/**, " + f".github/workflows/{workflow_name}.yml ]" ) else: path_filter = ( f"[ {ReadmeSteps.working_dir}/**, " + "examples/*requirements.txt, " + f".github/workflows/{workflow_name}.yml ]" ) replacements = { "steps": ReadmeSteps.step_array, "workflow_name": workflow_name, "ci_name": pipeline_name, "path_filter": path_filter, "crontab": f"{schedule_minute} {schedule_hour} * * *", "crontab_comment": f"Every day starting at {schedule_hour - 16}:{schedule_minute} BJT", } workflow_template_path = ( Path(ReadmeStepsManage.git_base_dir()) / "scripts" / "readme" / "ghactions_driver" / "workflow_templates" ) target_path = ( Path(ReadmeStepsManage.git_base_dir()) / ".github" / "workflows" / f"{workflow_name}.yml" ) template = Environment( loader=FileSystemLoader(workflow_template_path.resolve()) ).get_template(ReadmeSteps.template) content = template.render(replacements) with open(target_path.resolve(), "w", encoding="utf-8") as f: f.write(content) print(f"Write readme workflow: {target_path.resolve()}") output_telemetry.workflow_name = workflow_name output_telemetry.target_path = target_path output_telemetry.readme_folder = ReadmeSteps.working_dir output_telemetry.readme_name = ReadmeSteps.readme_name output_telemetry.path_filter = path_filter
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/readme_workflow_generate.py
from pathlib import Path from .readme_step import ReadmeStepsManage, ReadmeSteps from ghactions_driver.telemetry_obj import Telemetry def write_readme_workflow(readme_path, output_telemetry=Telemetry()): relative_path = Path(readme_path).relative_to( Path(ReadmeStepsManage.git_base_dir()) ) workflow_path = relative_path.parent.as_posix() relative_name_path = Path(readme_path).relative_to( Path(ReadmeStepsManage.git_base_dir()) / "examples" ) workflow_name = ( relative_name_path.as_posix() .replace(".md", "") .replace("/README", "") .replace("/", "_") .replace("-", "_") ) workflow_name = "samples_" + workflow_name ReadmeSteps.setup_target( working_dir=workflow_path, template="basic_workflow_replace_config_json.yml.jinja2" if "e2e_development_chat_with_pdf" in workflow_name else "basic_workflow_replace.yml.jinja2", target=f"{workflow_name}.yml", readme_name=relative_path.as_posix(), ) ReadmeSteps.install_dependencies() ReadmeSteps.install_dev_dependencies() if ( workflow_name.endswith("flows_chat_chat_with_image") or workflow_name.endswith("flows_standard_describe_image") ): ReadmeSteps.create_env_gpt4() ReadmeSteps.env_create_aoai("aoai_gpt4v_connection") else: ReadmeSteps.create_env() if workflow_name.endswith("pdf"): ReadmeSteps.env_create_aoai("chat_with_pdf_custom_connection") ReadmeSteps.create_run_yaml() if ( workflow_name.endswith("flows_standard_basic_with_builtin_llm") or workflow_name.endswith("flows_standard_flow_with_symlinks") or workflow_name.endswith("flows_standard_flow_with_additional_includes") or workflow_name.endswith("flows_standard_basic_with_connection") ): ReadmeSteps.yml_create_aoai("examples/connections/azure_openai.yml") ReadmeSteps.azure_login() if ( workflow_name.endswith("flows_chat_chat_with_image") or workflow_name.endswith("flows_standard_describe_image") ): ReadmeSteps.extract_steps_and_run_gpt_four() else: ReadmeSteps.extract_steps_and_run() ReadmeStepsManage.write_workflow( workflow_name, "samples_readme_ci", output_telemetry ) ReadmeSteps.cleanup()
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/readme_parse.py
import io import re from pathlib import Path import panflute import pypandoc from .readme_step import ReadmeStepsManage def strip_comments(code): code = str(code) code = re.sub(r"(?m)^ *#.*\n?", "", code) # remove comments splits = [ll.rstrip() for ll in code.splitlines() if ll.strip()] # remove empty splits_no_interactive = [ split for split in splits if "interactive" not in split and "pf flow serve" not in split and "pf connection delete" not in split ] # remove --interactive and pf flow serve and pf export docker text = "\n".join([ll.rstrip() for ll in splits_no_interactive]) # replacements text = text.replace("<your_api_key>", "$aoai_api_key") text = text.replace("<your_api_base>", "$aoai_api_endpoint") text = text.replace("<your_subscription_id>", "$test_workspace_sub_id") text = text.replace("<your_resource_group_name>", "$test_workspace_rg") text = text.replace("<your_workspace_name>", "$test_workspace_name") return text def prepare(doc): doc.full_text = "" def action(elem, doc): if isinstance(elem, panflute.CodeBlock) and "bash" in elem.classes: doc.full_text = "\n".join([doc.full_text, strip_comments(elem.text)]) def readme_parser(filename: str): real_filename = Path(ReadmeStepsManage.git_base_dir()) / filename data = pypandoc.convert_file(str(real_filename), "json") f = io.StringIO(data) doc = panflute.load(f) panflute.run_filter(action, prepare, doc=doc) return doc.full_text
0
promptflow_repo/promptflow/scripts/readme
promptflow_repo/promptflow/scripts/readme/ghactions_driver/resource_resolver.py
from pathlib import Path from typing import List import markdown import nbformat from .readme_step import ReadmeStepsManage RESOURCES_KEY_NAME = "resources" RESOURCES_KEY_ERROR_MESSAGE = ( "Please follow examples contributing guide to declare tutorial resources: " "https://github.com/microsoft/promptflow/blob/main/examples/CONTRIBUTING.md" ) def _parse_resources_string_from_notebook(path: Path) -> str: with open(path, "r", encoding="utf-8") as f: nb = nbformat.read(f, as_version=4) if RESOURCES_KEY_NAME not in nb.metadata: raise Exception(RESOURCES_KEY_ERROR_MESSAGE) return nb.metadata[RESOURCES_KEY_NAME] def _parse_resources_string_from_markdown(path: Path) -> str: markdown_content = path.read_text(encoding="utf-8") md = markdown.Markdown(extensions=["meta"]) md.convert(markdown_content) if RESOURCES_KEY_NAME not in md.Meta: raise Exception(RESOURCES_KEY_ERROR_MESSAGE) return md.Meta[RESOURCES_KEY_NAME][0] def _parse_resources(path: Path) -> List[str]: if path.suffix == ".ipynb": resources_string = _parse_resources_string_from_notebook(path) elif path.suffix == ".md": resources_string = _parse_resources_string_from_markdown(path) else: raise Exception(f"Unknown file type: {path.suffix!r}") return [resource.strip() for resource in resources_string.split(",")] def resolve_tutorial_resource(workflow_name: str, resource_path: Path) -> str: """Resolve tutorial resources, so that workflow can be triggered more precisely. A tutorial workflow should listen to changes of: 1. working directory 2. resources declared in notebook/markdown metadata 3. workflow file 4. examples/requirements.txt (for release verification) 5. examples/connections/azure_openai.yml (fall back as it is the most basic and common connection) """ # working directory git_base_dir = Path(ReadmeStepsManage.git_base_dir()) working_dir = resource_path.parent.relative_to(git_base_dir).as_posix() path_filter_list = [f"{working_dir}/**"] # resources declared in text file resources = _parse_resources(resource_path) for resource in resources: # skip empty line if len(resource) == 0: continue # validate resource path exists resource_path = (git_base_dir / resource).resolve() if not resource_path.exists(): raise FileNotFoundError("Please declare tutorial resources path whose base is the git repo root.") elif resource_path.is_file(): path_filter_list.append(resource) else: path_filter_list.append(f"{resource}/**") # workflow file path_filter_list.append(f".github/workflows/{workflow_name}.yml") # manually add examples/requirements.txt if not exists examples_req = "examples/requirements.txt" if examples_req not in path_filter_list: path_filter_list.append(examples_req) # manually add examples/connections/azure_openai.yml if not exists aoai_conn = "examples/connections/azure_openai.yml" if aoai_conn not in path_filter_list: path_filter_list.append(aoai_conn) return "[ " + ", ".join(path_filter_list) + " ]"
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_extract_steps_and_run.yml.jinja2
- name: {{ step_name }} working-directory: ${{ '{{' }} github.workspace }} run: | python scripts/readme/extract_steps_from_readme.py -f {{ readme_name }} -o {{ working_dir }} - name: Cat script working-directory: {{ working_dir }} run: | cat bash_script.sh - name: Run scripts against canary workspace (scheduled runs only) if: github.event_name == 'schedule' working-directory: {{ working_dir }} run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_CANARY }} bash bash_script.sh - name: Run scripts against production workspace if: github.event_name != 'schedule' working-directory: {{ working_dir }} run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_PROD }} bash bash_script.sh - name: Pip List for Debug if : ${{ '{{' }} always() }} working-directory: {{ working_dir }} run: | pip list - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ working_dir }}/bash_script.sh
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_azure_login.yml.jinja2
- name: {{ step_name }} uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_create_run_yml.yml.jinja2
- name: {{ step_name }} working-directory: {{ working_dir }} run: | gpt_base=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} gpt_base=$(echo ${gpt_base//\//\\/}) if [[ -e run.yml ]]; then sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ '{{' }} secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml fi
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_install_dev_deps.yml.jinja2
- name: {{ step_name }} working-directory: examples run: | python -m pip install --upgrade pip pip install -r dev_requirements.txt
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_extract_steps_and_run_gpt4.yml.jinja2
- name: {{ step_name }} working-directory: ${{ '{{' }} github.workspace }} run: | python scripts/readme/extract_steps_from_readme.py -f {{ readme_name }} -o {{ working_dir }} - name: Cat script working-directory: {{ working_dir }} run: | cat bash_script.sh - name: Run scripts against canary workspace (scheduled runs only) if: github.event_name == 'schedule' working-directory: {{ working_dir }} run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_GPT_4V_KEY }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_GPT_4V_ENDPOINT }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_CANARY }} bash bash_script.sh - name: Run scripts against production workspace if: github.event_name != 'schedule' working-directory: {{ working_dir }} run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_GPT_4V_KEY }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_GPT_4V_ENDPOINT }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_PROD }} bash bash_script.sh - name: Pip List for Debug if : ${{ '{{' }} always() }} working-directory: {{ working_dir }} run: | pip list - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ working_dir }}/bash_script.sh
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_create_env.yml.jinja2
- name: {{ step_name }} working-directory: {{ working_dir }} run: | AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) if [[ -e .env.example ]]; then echo "env replacement" sed -i -e "s/<your_AOAI_key>/$AOAI_API_KEY/g" -e "s/<your_AOAI_endpoint>/$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_install_deps.yml.jinja2
- name: {{ step_name }} working-directory: examples run: | if [[ -e requirements.txt ]]; then python -m pip install --upgrade pip pip install -r requirements.txt fi
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_yml_create_aoai.yml.jinja2
- name: {{ step_name }} working-directory: ${{ '{{' }} github.workspace }} run: pf connection create --file {{ yaml_name }} --set api_key=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} api_base=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_env_create_aoai.yml.jinja2
- name: {{ step_name }} working-directory: {{ working_dir }} run: | if [[ -e .env ]]; then pf connection create --file .env --name {{ connection_name }} fi if [[ -e azure_openai.yml ]]; then pf connection create --file azure_openai.yml --name {{ connection_name }} fi pf connection list
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_steps/step_create_env_gpt4.yml.jinja2
- name: {{ step_name }} working-directory: {{ working_dir }} run: | AOAI_API_KEY=${{ '{{' }} secrets.AOAI_GPT_4V_KEY }} AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_GPT_4V_ENDPOINT }} AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) cp ../../../connections/azure_openai.yml ./azure_openai.yml sed -i -e "s/<user-input>/$AOAI_API_KEY/g" -e "s/aoai-api-endpoint/$AOAI_API_ENDPOINT/g" azure_openai.yml
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/bash_script/bash_script.sh.jinja2
#!/usr/bin/env bash set -xe {{ command }}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/readme_templates/README.md.jinja2
# Promptflow examples [![code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) [![license: MIT](https://img.shields.io/badge/License-MIT-purple.svg)](../LICENSE) ## Get started **Install dependencies** - Bootstrap your python environment. - e.g: create a new [conda](https://conda.io/projects/conda/en/latest/user-guide/getting-started.html) environment. `conda create -n pf-examples python=3.9`. - install required packages in python environment : `pip install -r requirements.txt` - show installed sdk: `pip show promptflow` **Quick start** | path | status | description | ------|--------|------------- {% for quickstart in quickstarts.notebooks %}| [{{ quickstart.name }}]({{ quickstart.path }}) | [![{{quickstart.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{quickstart.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{quickstart.yaml_name}}) | {{ quickstart.description }} | {% endfor %} ## CLI examples ### Tutorials ([tutorials](tutorials)) | path | status | description | ------|--------|------------- {% for tutorial in tutorials.readmes %}| [{{ tutorial.name }}]({{ tutorial.path }}) | [![{{tutorial.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}) | {{ tutorial.description }} | {% endfor %} ### Flows ([flows](flows)) #### [Standard flows](flows/standard/) | path | status | description | ------|--------|------------- {% for flow in flows.readmes %}| [{{ flow.name }}]({{ flow.path }}) | [![{{flow.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}) | {{ flow.description }} | {% endfor %} #### [Evaluation flows](flows/evaluation/) | path | status | description | ------|--------|------------- {% for evaluation in evaluations.readmes %}| [{{ evaluation.name }}]({{ evaluation.path }}) | [![{{evaluation.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}) | {{ evaluation.description }} | {% endfor %} #### [Chat flows](flows/chat/) | path | status | description | ------|--------|------------- {% for chat in chats.readmes %}| [{{ chat.name }}]({{ chat.path }}) | [![{{chat.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}) | {{ chat.description }} | {% endfor %} ### Tool Use Cases ([Tool Use Cases](tools/use-cases)) | path | status | description | ------|--------|------------- {% for toolusecase in toolusecases.readmes %}| [{{ toolusecase.name }}]({{ toolusecase.path }}) | [![{{toolusecase.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{toolusecase.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{toolusecase.yaml_name}}) | {{ toolusecase.description }} | {% endfor %} ### Connections ([connections](connections)) | path | status | description | ------|--------|------------- {% for connection in connections.readmes %}| [{{ connection.name }}]({{ connection.path }}) | [![{{connection.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}) | {{ connection.description }} | {% endfor %} ## SDK examples | path | status | description | ------|--------|------------- {% for quickstart in quickstarts.notebooks %}| [{{ quickstart.name }}]({{ quickstart.path }}) | [![{{quickstart.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{quickstart.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{quickstart.yaml_name}}) | {{ quickstart.description }} | {% endfor %} {%- for tutorial in tutorials.notebooks -%}| [{{ tutorial.name }}]({{ tutorial.path }}) | [![{{tutorial.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}) | {{ tutorial.description }} | {% endfor %} {%- if connections.notebooks|length > 0 -%}{% for connection in connections.notebooks %}| [{{ connection.name }}]({{ connection.path }}) | [![{{connection.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}) | {{ connection.description }} | {% endfor %}{% endif %} {%- if chats.notebooks|length > 0 -%}{% for chat in chats.notebooks %}| [{{ chat.name }}]({{ chat.path }}) | [![{{chat.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}) | {{ chat.description }} | {% endfor %}{% endif %} {%- if evaluations.notebooks|length > 0 -%}{% for evaluation in evaluations.notebooks %}| [{{ evaluation.name }}]({{ evaluation.path }}) | [![{{evaluation.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}) | {{ evaluation.description }} | {% endfor %}{% endif %} {%- if flows.notebooks|length > 0 -%}{% for flow in flows.notebooks %}| [{{ flow.name }}]({{ flow.path }}) | [![{{flow.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}) | {{ flow.description }} | {% endfor %}{% endif %} ## Contributing We welcome contributions and suggestions! Please see the [contributing guidelines](../CONTRIBUTING.md) for details. ## Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Please see the [code of conduct](../CODE_OF_CONDUCT.md) for details. ## Reference * [Promptflow documentation](https://microsoft.github.io/promptflow/)
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/basic_workflow_replace_config_json.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Generate config.json for canary workspace (scheduled runs only) if: github.event_name == 'schedule' run: echo '${{ '{{' }} secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Generate config.json for production workspace if: github.event_name != 'schedule' run: echo '${{ '{{' }} secrets.EXAMPLE_WORKSPACE_CONFIG_JSON_PROD }}' > ${{ '{{' }} github.workspace }}/examples/config.json {%- filter indent(width=2) -%} {% for step in steps %} {{ step.get_workflow_step() }}{% endfor %} {%- endfilter -%} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/basic_workflow.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Azure Login uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Prepare requirements run: | python -m pip install --upgrade pip pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt - name: Create Aoai Connection run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" - name: Test Notebook working-directory: {{ gh_working_dir }} run: | papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ gh_working_dir }} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/flow_as_function.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Azure Login uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Prepare requirements run: | python -m pip install --upgrade pip pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt - name: Create Aoai Connection run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" - name: Create new Aoai Connection run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" name=new_ai_connection - name: Test Notebook working-directory: {{ gh_working_dir }} run: | papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb -p api_key ${{ '{{' }} secrets.AOAI_API_KEY_TEST }} -p api_base ${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} -p api_version 2023-07-01-preview - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ gh_working_dir }} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/workflow_config_json.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Generate config.json for canary workspace (scheduled runs only) if: github.event_name == 'schedule' run: echo '${{ '{{' }} secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Generate config.json for production workspace if: github.event_name != 'schedule' run: echo '${{ '{{' }} secrets.EXAMPLE_WORKSPACE_CONFIG_JSON_PROD }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Azure Login uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Prepare requirements run: | python -m pip install --upgrade pip pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt - name: Create Aoai Connection run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" - name: Test Notebook working-directory: {{ gh_working_dir }} run: | papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ gh_working_dir }} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/basic_workflow_replace.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" {%- filter indent(width=2) -%} {% for step in steps %} {{ step.get_workflow_step() }}{% endfor %} {%- endfilter -%} {% endblock steps %}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/workflow_skeleton.yml.jinja2
# This code is autogenerated. # Code is generated by running custom script: python3 readme.py # Any manual changes to this file may cause incorrect behavior. # Any manual changes will be overwritten if the code is regenerated. name: {{ workflow_name }} on: schedule: - cron: "{{ crontab }}" # {{ crontab_comment }} pull_request: branches: [ main ] paths: {{ path_filter }} workflow_dispatch: env: IS_IN_CI_PIPELINE: "true" jobs: {{ workflow_name }}: {%- filter indent(width=4) -%} {% block steps %} {% endblock steps %} {%- endfilter -%}
0
promptflow_repo/promptflow/scripts/readme/ghactions_driver
promptflow_repo/promptflow/scripts/readme/ghactions_driver/workflow_templates/pdf_workflow.yml.jinja2
{% extends "workflow_skeleton.yml.jinja2" %} {% block steps %} runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Generate config.json for canary workspace (scheduled runs only) if: github.event_name == 'schedule' run: echo '${{ '{{' }} secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Generate config.json for production workspace if: github.event_name != 'schedule' run: echo '${{ '{{' }} secrets.EXAMPLE_WORKSPACE_CONFIG_JSON_PROD }}' > ${{ '{{' }} github.workspace }}/examples/config.json - name: Setup Python 3.9 environment uses: actions/setup-python@v4 with: python-version: "3.9" - name: Prepare sample requirements working-directory: {{ gh_working_dir }} run: | python -m pip install --upgrade pip pip install -r requirements.txt - name: Prepare requirements run: | python -m pip install --upgrade pip pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt - name: Create Chat With PDF Custom Connection working-directory: {{ gh_working_dir }} run: | AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) if [[ -e .env.example ]]; then echo "env replacement" sed -i -e "s/<your_AOAI_key>/$AOAI_API_KEY/g" -e "s/<your_AOAI_endpoint>/$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env pf connection create --file .env --name chat_with_pdf_custom_connection fi - name: Create AOAI Connection working-directory: examples/connections run: | AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} if [[ -e azure_openai.yml ]]; then pf connection create --file azure_openai.yml --set api_key=$AOAI_API_KEY api_base=$AOAI_API_ENDPOINT fi - name: Azure Login uses: azure/login@v1 with: creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} - name: Test Notebook working-directory: {{ gh_working_dir }} run: | papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb - name: Upload artifact if: ${{ '{{' }} always() }} uses: actions/upload-artifact@v3 with: name: artifact path: {{ gh_working_dir }} {% endblock steps %}
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/compliance-check/Check-PolicheckScan.ps1
# Copyright (C) Microsoft Corporation. All rights reserved. <# .SYNOPSIS Check Policheck Scan result. .DESCRIPTION Helper script to check the Policheck result. If there is policheck failure, show the error and throw exception. #> [CmdLetbinding()] param ( [string]$policheckResult, [string]$raiseError = $true ) $result = Get-Content -Path $policheckResult | Measure-Object -Line; Write-Host("Number of errors found in this scan: " + ($result.Lines - 1)); if ($raiseError -and ($result.Lines -gt 1)) { Get-Content -Path $policheckResult; throw "Policheck scan completed successfully but there are issues to fix."; } # Read-Host "Press enter to finish the process and close this window";
0
promptflow_repo/promptflow/scripts
promptflow_repo/promptflow/scripts/compliance-check/user_exclusion.xml
<PoliCheckExclusions> <!-- All strings must be UPPER CASE --> <!--index-xxx.js is an auto-generated javascript file - skipped given it's not expected to be readable --> <Exclusion Type="FileName">SRC\PROMPTFLOW\PROMPTFLOW\_SDK\_SERVING\STATIC\INDEX.JS</Exclusion> </PoliCheckExclusions>
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow-tools/connections.json.example
{ "azure_open_ai_connection": { "type": "AzureOpenAIConnection", "value": { "api_key": "aoai-api-key", "api_base": "aoai-api-endpoint", "api_type": "azure", "api_version": "2023-07-01-preview" }, "module": "promptflow.connections" }, "serp_connection": { "type": "SerpConnection", "value": { "api_key": "serpapi-api-key" }, "module": "promptflow.connections" }, "custom_connection": { "type": "CustomConnection", "value": { "key1": "hey", "key2": "val2" }, "module": "promptflow.connections", "secret_keys": [ "key1" ] }, "gpt2_connection": { "type": "CustomConnection", "value": { "endpoint_url": "custom-endpoint-url", "model_family": "GPT2", "endpoint_api_key": "custom-endpoint-api-key" }, "module": "promptflow.connections", "secret_keys": [ "endpoint_api_key" ] }, "open_source_llm_ws_service_connection": { "type": "CustomConnection", "value": { "service_credential": "service-credential" }, "module": "promptflow.connections", "secret_keys": [ "service_credential" ] }, "open_ai_connection": { "type": "OpenAIConnection", "value": { "api_key": "openai-api-key", "organization": "openai-api-org" }, "module": "promptflow.connections" }, "azure_content_safety_connection": { "type": "AzureContentSafetyConnection", "value": { "api_key": "azure-content-safety-api-key", "endpoint": "azure-content-safety-endpoint-url", "api_version": "2023-10-01", "api_type": "Content Safety", "name": "prompt-flow-acs-tool-test" }, "module": "promptflow.connections" } }
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow-tools/CHANGELOG.md
# Release History ## 1.0.0 (2023.11.30) ### Features Added - Support openai 1.x in promptflow-tools - Add new tool "OpenAI GPT-4V"
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow-tools/README.md
# Prompt flow tools [![Python package](https://img.shields.io/pypi/v/promptflow-tools)](https://pypi.org/project/promptflow-tools/) [![License: MIT](https://img.shields.io/github/license/microsoft/promptflow)](https://github.com/microsoft/promptflow/blob/main/LICENSE) ## Introduction Tools are the fundamental building blocks of a flow in Azure Machine Learning prompt flow. Each tool is a simple, executable unit with a specific function, allowing users to perform various tasks. By combining different tools, users can create a flow that accomplishes a wide range of goals. One of the key benefit of prompt flow tools is their seamless integration with third-party APIs and python open source packages. This not only improves the functionality of large language models but also makes the development process more efficient. In this package, we provide a set of builtin tools of prompt flow, which are the most commonly used tools in the development of AI applications. We also provide a flexible way for users to create their own tools and share them with others. See [Create and Use Tool Package](https://github.com/microsoft/promptflow/blob/main/docs/how-to-guides/develop-a-tool/create-and-use-tool-package.md) for more details.
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow-tools/setup.py
import os import re from io import open from typing import Any, List, Match, cast from setuptools import find_namespace_packages, setup PACKAGE_NAME = "promptflow-tools" PACKAGE_FOLDER_PATH = "promptflow" def parse_requirements(file_name: str) -> List[str]: with open(file_name) as f: return [ require.strip() for require in f if require.strip() and not require.startswith('#') ] # Version extraction inspired from 'requests' with open(os.path.join(PACKAGE_FOLDER_PATH, "version.txt"), "r") as fd: version_content = fd.read() print(version_content) version = cast(Match[Any], re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]', version_content, re.MULTILINE)).group(1) if not version: raise RuntimeError("Cannot find version information") with open("README.md", encoding="utf-8") as f: readme = f.read() with open("CHANGELOG.md", encoding="utf-8") as f: changelog = f.read() setup( name=PACKAGE_NAME, version=version, description="Prompt flow built-in tools", long_description_content_type="text/markdown", long_description=readme + "\n\n" + changelog, author="Microsoft Corporation", author_email="[email protected]", url="https://github.com/microsoft/promptflow", classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], python_requires="<4.0,>=3.8", install_requires=parse_requirements('requirements.txt'), packages=find_namespace_packages(include=[f"{PACKAGE_FOLDER_PATH}.*"]), entry_points={ "package_tools": ["builtins = promptflow.tools.list:list_package_tools"], }, include_package_data=True, project_urls={ "Bug Reports": "https://github.com/microsoft/promptflow/issues", "Source": "https://github.com/microsoft/promptflow", }, )
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow-tools/MANIFEST.in
include promptflow/tools/yamls/*.yaml
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow-tools/requirements.txt
google-search-results==2.4.1 promptflow # promptflow-tools only supports openai 1.x openai>=1.0.0
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow-tools/NOTICE.txt
NOTICES AND INFORMATION Do Not Translate or Localize This software incorporates material from third parties. Microsoft makes certain open source code available at https://3rdpartysource.microsoft.com, or you may send a check or money order for US $5.00, including the product name, the open source component name, platform, and version number, to: Source Code Compliance Team Microsoft Corporation One Microsoft Way Redmond, WA 98052 USA Notwithstanding any other terms, you may reverse engineer this software to the extent required to debug changes to any libraries licensed under the GNU Lesser General Public License. --------------------------------------------------------- google-search-results 2.4.1 - MIT MIT License Copyright (c) 2018-2021 SerpApi Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---------------------------------------------------------
0
promptflow_repo/promptflow/src
promptflow_repo/promptflow/src/promptflow-tools/README.dev.md
# Development Guide ## Prerequisites ```bash pip install -r requirements.txt pip install pytest pytest-mock ``` ## Run tests - Create connection config file by `cp connections.json.example connections.json`. - Fill in fields manually in `connections.json`. - `cd tests` and run `pytest -s -v` to run all tests. ## Run tests in CI Use this [workflow](https://github.com/microsoft/promptflow/actions/workflows/tools_secret_upload.yml) to upload secrets in key vault. The secrets you uploaded would be used in [tools tests](https://github.com/microsoft/promptflow/actions/workflows/tools_tests.yml). Note that you only need to upload the SECRETS. > [!NOTE] After triggering the workflow, kindly request approval from Promptflow Support before proceeding further. ## PR check-in criteria Here's a friendly heads-up! We've got some criteria for you to self-review your code changes. It's a great way to double-check your work and make sure everything is in order before you share it. Happy coding! ### Maintain code quality The code you submit in your pull request should adhere to the following guidelines: - **Maintain clean code**: The code should be clean, easy to understand, and well-structured to promote readability and maintainability. - **Comment on your code**: Use comments to explain the purpose of certain code segments, particularly complex or non-obvious ones. This assists other developers in understanding your work. - **Correct typos and grammatical errors**: Ensure that the code and file names are free from spelling mistakes and grammatical errors. This enhances the overall presentation and clarity of your code. - **Avoid hard-coded values**: It is best to avoid hard-coding values unless absolutely necessary. Instead, use variables, constants, or configuration files, which can be easily modified without changing the source code. - **Prevent code duplication**: Modify the original code to be more general instead of duplicating it. Code duplication can lead to longer, more complex code that is harder to maintain. - **Implement effective error handling**: Good error handling is critical for troubleshooting customer issues and analyzing key metrics. Follow the guidelines provided in the [Error Handling Guideline](https://msdata.visualstudio.com/Vienna/_git/PromptFlow?path=/docs/error_handling_guidance.md&_a=preview) and reference the [exception.py](https://github.com/microsoft/promptflow/blob/main/src/promptflow-tools/promptflow/tools/exception.py) file for examples. ### Ensure high test coverage Test coverage is crucial for maintaining code quality. Please adhere to the following guidelines: - **Comprehensive Testing**: Include unit tests and e2e tests for any new functionality introduced. - **Exception Testing**: Make sure to incorporate unit tests for all exceptions. These tests should verify error codes, error messages, and other important values. For reference, you can check out [TestHandleOpenAIError](https://github.com/microsoft/promptflow/blob/main/src/promptflow-tools/tests/test_handle_openai_error.py). - **VSCode Testing**: If you're adding a new built-in tool, make sure to test your tool within the VSCode environment prior to submitting your PR. For more guidance on this, refer to [Use your tool from VSCode Extension](https://github.com/microsoft/promptflow/blob/main/docs/how-to-guides/develop-a-tool/create-and-use-tool-package.md#use-your-tool-from-vscode-extension). ### Add documents Ensure to include documentation for your new built-in tool, following the guidelines below: - **Error-Free Content**: Rectify all typographical and grammatical errors in the documentation. This will ensure clarity and readability. - **Code Alignment**: The documentation should accurately reflect the current state of your code. Ensure that all described functionalities and behaviors match with your implemented code. - **Functional Links**: Verify that all embedded links within the documentation are functioning properly, leading to the correct resources or references.
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/promptflow/version.txt
VERSION = "1.1.0"
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/openai.py
from enum import Enum try: from openai import OpenAI as OpenAIClient except Exception: raise Exception( "Please upgrade your OpenAI package to version 1.0.0 or later using the command: pip install --upgrade openai.") from promptflow.tools.common import render_jinja_template, handle_openai_error, \ parse_chat, to_bool, validate_functions, process_function_call, \ post_process_chat_api_response, normalize_connection_config # Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow' # since the code here is in promptflow namespace as well from promptflow._internal import ToolProvider, tool, register_apis from promptflow.connections import OpenAIConnection from promptflow.contracts.types import PromptTemplate class Engine(str, Enum): TEXT_DAVINCI_001 = "text-davinci-001" TEXT_DAVINCI_002 = "text-davinci-002" TEXT_DAVINCI_003 = "text-davinci-003" TEXT_CURIE_001 = "text-curie-001" TEXT_BABBAGE_001 = "text-babbage-001" TEXT_ADA_001 = "text-ada-001" CODE_CUSHMAN_001 = "code-cushman-001" CODE_DAVINCI_002 = "code-davinci-002" class OpenAI(ToolProvider): def __init__(self, connection: OpenAIConnection): super().__init__() self._connection_dict = normalize_connection_config(connection) self._client = OpenAIClient(**self._connection_dict) @tool @handle_openai_error() def completion( self, prompt: PromptTemplate, model: Engine = Engine.TEXT_DAVINCI_003, suffix: str = None, max_tokens: int = 16, temperature: float = 1.0, top_p: float = 1.0, n: int = 1, # stream is a hidden to the end user, it is only supposed to be set by the executor. stream: bool = False, logprobs: int = None, echo: bool = False, stop: list = None, presence_penalty: float = 0, frequency_penalty: float = 0, best_of: int = 1, logit_bias: dict = {}, user: str = "", **kwargs, ) -> str: prompt = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs) # TODO: remove below type conversion after client can pass json rather than string. echo = to_bool(echo) stream = to_bool(stream) response = self._client.completions.create( prompt=prompt, model=model.value if isinstance(model, Enum) else model, # empty string suffix should be treated as None. suffix=suffix if suffix else None, max_tokens=int(max_tokens), temperature=float(temperature), top_p=float(top_p), n=int(n), stream=stream, logprobs=int(logprobs) if logprobs else None, echo=echo, stop=stop if stop else None, presence_penalty=float(presence_penalty), frequency_penalty=float(frequency_penalty), best_of=int(best_of), # Logit bias must be a dict if we passed it to openai api. logit_bias=logit_bias if logit_bias else {}, user=user ) if stream: def generator(): for chunk in response: if chunk.choices: yield getattr(chunk.choices[0], "text", "") # We must return the generator object, not using yield directly here. # Otherwise, the function itself will become a generator, despite whether stream is True or False. return generator() else: # get first element because prompt is single. return response.choices[0].text @tool @handle_openai_error() def chat( self, prompt: PromptTemplate, model: str = "gpt-3.5-turbo", temperature: float = 1.0, top_p: float = 1.0, n: int = 1, # stream is a hidden to the end user, it is only supposed to be set by the executor. stream: bool = False, stop: list = None, max_tokens: int = None, presence_penalty: float = 0, frequency_penalty: float = 0, logit_bias: dict = {}, user: str = "", # function_call can be of type str or dict. function_call: object = None, functions: list = None, response_format: object = None, **kwargs ) -> [str, dict]: chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs) messages = parse_chat(chat_str) # TODO: remove below type conversion after client can pass json rather than string. stream = to_bool(stream) params = { "model": model, "messages": messages, "temperature": float(temperature), "top_p": float(top_p), "n": int(n), "stream": stream, "stop": stop if stop else None, "max_tokens": int(max_tokens) if max_tokens is not None and str(max_tokens).lower() != "inf" else None, "presence_penalty": float(presence_penalty), "frequency_penalty": float(frequency_penalty), "logit_bias": logit_bias, "user": user, "response_format": response_format } if functions is not None: validate_functions(functions) params["functions"] = functions params["function_call"] = process_function_call(function_call) completion = self._client.chat.completions.create(**params) return post_process_chat_api_response(completion, stream, functions) register_apis(OpenAI) @tool def completion( connection: OpenAIConnection, prompt: PromptTemplate, model: Engine = Engine.TEXT_DAVINCI_003, suffix: str = None, max_tokens: int = 16, temperature: float = 1.0, top_p: float = 1, n: int = 1, stream: bool = False, logprobs: int = None, echo: bool = False, stop: list = None, presence_penalty: float = 0, frequency_penalty: float = 0, best_of: int = 1, logit_bias: dict = {}, user: str = "", **kwargs ) -> [str, dict]: return OpenAI(connection).completion( prompt=prompt, model=model, suffix=suffix, max_tokens=max_tokens, temperature=temperature, top_p=top_p, n=n, stream=stream, logprobs=logprobs, echo=echo, stop=stop if stop else None, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, best_of=best_of, logit_bias=logit_bias, user=user, **kwargs, ) @tool def chat( connection: OpenAIConnection, prompt: PromptTemplate, model: str = "gpt-3.5-turbo", temperature: float = 1, top_p: float = 1, n: int = 1, stream: bool = False, stop: list = None, max_tokens: int = None, presence_penalty: float = 0, frequency_penalty: float = 0, logit_bias: dict = {}, user: str = "", function_call: object = None, functions: list = None, response_format: object = None, **kwargs ) -> [str, dict]: return OpenAI(connection).chat( prompt=prompt, model=model, temperature=temperature, top_p=top_p, n=n, stream=stream, stop=stop if stop else None, max_tokens=max_tokens, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, user=user, function_call=function_call, functions=functions, response_format=response_format, **kwargs, )
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/list.py
from pathlib import Path from ruamel.yaml import YAML def collect_tools_from_directory(base_dir) -> dict: tools = {} yaml = YAML() for f in Path(base_dir).glob("**/*.yaml"): with open(f, "r") as f: tools_in_file = yaml.load(f) for identifier, tool in tools_in_file.items(): tools[identifier] = tool return tools def list_package_tools(): """List package tools""" yaml_dir = Path(__file__).parent / "yamls" return collect_tools_from_directory(yaml_dir)
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/openai_gpt4v.py
try: from openai import OpenAI as OpenAIClient except Exception: raise Exception( "Please upgrade your OpenAI package to version 1.0.0 or later using the command: pip install --upgrade openai.") from promptflow.connections import OpenAIConnection from promptflow.contracts.types import PromptTemplate from promptflow._internal import ToolProvider, tool from promptflow.tools.common import render_jinja_template, handle_openai_error, \ parse_chat, post_process_chat_api_response, preprocess_template_string, \ find_referenced_image_set, convert_to_chat_list, normalize_connection_config class OpenAI(ToolProvider): def __init__(self, connection: OpenAIConnection): super().__init__() self._connection_dict = normalize_connection_config(connection) self._client = OpenAIClient(**self._connection_dict) @tool(streaming_option_parameter="stream") @handle_openai_error() def chat( self, prompt: PromptTemplate, model: str = "gpt-4-vision-preview", temperature: float = 1.0, top_p: float = 1.0, # stream is a hidden to the end user, it is only supposed to be set by the executor. stream: bool = False, stop: list = None, max_tokens: int = None, presence_penalty: float = 0, frequency_penalty: float = 0, **kwargs, ) -> [str, dict]: # keep_trailing_newline=True is to keep the last \n in the prompt to avoid converting "user:\t\n" to "user:". prompt = preprocess_template_string(prompt) referenced_images = find_referenced_image_set(kwargs) # convert list type into ChatInputList type converted_kwargs = convert_to_chat_list(kwargs) chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **converted_kwargs) messages = parse_chat(chat_str, list(referenced_images)) params = { "model": model, "messages": messages, "temperature": temperature, "top_p": top_p, "n": 1, "stream": stream, "presence_penalty": presence_penalty, "frequency_penalty": frequency_penalty, } if stop: params["stop"] = stop if max_tokens is not None: params["max_tokens"] = max_tokens completion = self._client.chat.completions.create(**params) return post_process_chat_api_response(completion, stream, None)
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/aoai_gpt4v.py
try: from openai import AzureOpenAI as AzureOpenAIClient except Exception: raise Exception( "Please upgrade your OpenAI package to version 1.0.0 or later using the command: pip install --upgrade openai.") from promptflow._internal import ToolProvider, tool from promptflow.connections import AzureOpenAIConnection from promptflow.contracts.types import PromptTemplate from promptflow.tools.common import render_jinja_template, handle_openai_error, parse_chat, \ preprocess_template_string, find_referenced_image_set, convert_to_chat_list, normalize_connection_config, \ post_process_chat_api_response class AzureOpenAI(ToolProvider): def __init__(self, connection: AzureOpenAIConnection): super().__init__() self.connection = connection self._connection_dict = normalize_connection_config(self.connection) azure_endpoint = self._connection_dict.get("azure_endpoint") api_version = self._connection_dict.get("api_version") api_key = self._connection_dict.get("api_key") self._client = AzureOpenAIClient(azure_endpoint=azure_endpoint, api_version=api_version, api_key=api_key) @tool(streaming_option_parameter="stream") @handle_openai_error() def chat( self, prompt: PromptTemplate, deployment_name: str, temperature: float = 1.0, top_p: float = 1.0, # stream is a hidden to the end user, it is only supposed to be set by the executor. stream: bool = False, stop: list = None, max_tokens: int = None, presence_penalty: float = 0, frequency_penalty: float = 0, **kwargs, ) -> str: # keep_trailing_newline=True is to keep the last \n in the prompt to avoid converting "user:\t\n" to "user:". prompt = preprocess_template_string(prompt) referenced_images = find_referenced_image_set(kwargs) # convert list type into ChatInputList type converted_kwargs = convert_to_chat_list(kwargs) chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **converted_kwargs) messages = parse_chat(chat_str, list(referenced_images)) headers = { "Content-Type": "application/json", "ms-azure-ai-promptflow-called-from": "aoai-gpt4v-tool" } params = { "messages": messages, "temperature": temperature, "top_p": top_p, "n": 1, "stream": stream, "presence_penalty": presence_penalty, "frequency_penalty": frequency_penalty, "extra_headers": headers, "model": deployment_name, } if stop: params["stop"] = stop if max_tokens is not None: params["max_tokens"] = max_tokens completion = self._client.chat.completions.create(**params) return post_process_chat_api_response(completion, stream, None)
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/__init__.py
from .aoai import AzureOpenAI # noqa: F401 from .openai import OpenAI # noqa: F401 from .serpapi import SerpAPI # noqa: F401
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/embedding.py
from enum import Enum from typing import Union from openai import AzureOpenAI as AzureOpenAIClient, OpenAI as OpenAIClient from promptflow.tools.common import handle_openai_error, normalize_connection_config from promptflow.tools.exception import InvalidConnectionType # Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow' # since the code here is in promptflow namespace as well from promptflow._internal import tool from promptflow.connections import AzureOpenAIConnection, OpenAIConnection class EmbeddingModel(str, Enum): TEXT_EMBEDDING_ADA_002 = "text-embedding-ada-002" TEXT_SEARCH_ADA_DOC_001 = "text-search-ada-doc-001" TEXT_SEARCH_ADA_QUERY_001 = "text-search-ada-query-001" @tool @handle_openai_error() def embedding(connection: Union[AzureOpenAIConnection, OpenAIConnection], input: str, deployment_name: str = "", model: EmbeddingModel = EmbeddingModel.TEXT_EMBEDDING_ADA_002): if isinstance(connection, AzureOpenAIConnection): client = AzureOpenAIClient(**normalize_connection_config(connection)) return client.embeddings.create( input=input, model=deployment_name, extra_headers={"ms-azure-ai-promptflow-called-from": "aoai-tool"} ).data[0].embedding elif isinstance(connection, OpenAIConnection): client = OpenAIClient(**normalize_connection_config(connection)) return client.embeddings.create( input=input, model=model ).data[0].embedding else: error_message = f"Not Support connection type '{type(connection).__name__}' for embedding api. " \ f"Connection type should be in [AzureOpenAIConnection, OpenAIConnection]." raise InvalidConnectionType(message=error_message)
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/azure_content_safety.py
from enum import Enum from typing import Dict, List, Union import json import requests from promptflow import tool, ToolProvider from promptflow.connections import AzureContentSafetyConnection from promptflow.tools.exception import AzureContentSafetyInputValueError, AzureContentSafetySystemError class TextCategorySensitivity(str, Enum): DISABLE = "disable" LOW_SENSITIVITY = "low_sensitivity" MEDIUM_SENSITIVITY = "medium_sensitivity" HIGH_SENSITIVITY = "high_sensitivity" class AzureContentSafety(ToolProvider): """ Doc reference : https://review.learn.microsoft.com/en-us/azure/cognitive-services/content-safety/quickstart?branch=pr-en-us-233724&pivots=programming-language-rest """ def __init__(self, connection: AzureContentSafetyConnection): self.connection = connection super(AzureContentSafety, self).__init__() @tool def analyze_text( self, text: str, hate_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY, sexual_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY, self_harm_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY, violence_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY, ): content_safety = ContentSafety(self.connection.endpoint, self.connection.api_key, self.connection.api_version) media_type = MediaType.Text blocklists = [] detection_result = content_safety.detect(media_type, text, blocklists) # Set the reject thresholds for each category reject_thresholds = { Category.Hate: switch_category_threshold(hate_category), Category.SelfHarm: switch_category_threshold(self_harm_category), Category.Sexual: switch_category_threshold(sexual_category), Category.Violence: switch_category_threshold(violence_category), } # Make a decision based on the detection result and reject thresholds if self.connection.api_version == "2023-10-01": decision_result = content_safety.make_decision_1001(detection_result, reject_thresholds) else: decision_result = content_safety.make_decision(detection_result, reject_thresholds) return convert_decision_to_json(decision_result) @tool def analyze_text( connection: AzureContentSafetyConnection, text: str, hate_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY, sexual_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY, self_harm_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY, violence_category: TextCategorySensitivity = TextCategorySensitivity.MEDIUM_SENSITIVITY, ): return AzureContentSafety(connection).analyze_text( text=text, hate_category=hate_category, sexual_category=sexual_category, self_harm_category=self_harm_category, violence_category=violence_category, ) def switch_category_threshold(sensitivity: TextCategorySensitivity) -> int: switcher = { TextCategorySensitivity.DISABLE: -1, TextCategorySensitivity.LOW_SENSITIVITY: 6, TextCategorySensitivity.MEDIUM_SENSITIVITY: 4, TextCategorySensitivity.HIGH_SENSITIVITY: 2, } return switcher.get(sensitivity, f"Non-supported sensitivity: {sensitivity}") class MediaType(Enum): Text = 1 Image = 2 class Category(Enum): Hate = 1 SelfHarm = 2 Sexual = 3 Violence = 4 class Action(Enum): Accept = "Accept" Reject = "Reject" class Decision(object): def __init__(self, suggested_action: Action, action_by_category: Dict[Category, Action]) -> None: """ Represents the decision made by the content moderation system. Args: - suggested_action (Action): The suggested action to take. - action_by_category (dict[Category, Action]): The action to take for each category. """ self.suggested_action = suggested_action self.action_by_category = action_by_category def convert_decision_to_json(decision: Decision): result_json = {} result_json["suggested_action"] = decision.suggested_action.value category_json = {} for key, value in decision.action_by_category.items(): category_json[key.name] = value.value result_json["action_by_category"] = category_json return result_json class ContentSafety(object): def __init__(self, endpoint: str, subscription_key: str, api_version: str) -> None: """ Creates a new ContentSafety instance. Args: - endpoint (str): The endpoint URL for the Content Safety API. - subscription_key (str): The subscription key for the Content Safety API. - api_version (str): The version of the Content Safety API to use. """ self.endpoint = endpoint self.subscription_key = subscription_key self.api_version = api_version def build_url(self, media_type: MediaType) -> str: """ Builds the URL for the Content Safety API based on the media type. Args: - media_type (MediaType): The type of media to analyze. Returns: - str: The URL for the Content Safety API. """ if media_type == MediaType.Text: return f"{self.endpoint}/contentsafety/text:analyze?api-version={self.api_version}" elif media_type == MediaType.Image: return f"{self.endpoint}/contentsafety/image:analyze?api-version={self.api_version}" else: error_message = f"Invalid Media Type {media_type}" raise AzureContentSafetyInputValueError(message=error_message) def build_headers(self) -> Dict[str, str]: """ Builds the headers for the Content Safety API request. Returns: - dict[str, str]: The headers for the Content Safety API request. """ return { "Ocp-Apim-Subscription-Key": self.subscription_key, "Content-Type": "application/json", "ms-azure-ai-sender": "prompt_flow" } def build_request_body( self, media_type: MediaType, content: str, blocklists: List[str], ) -> dict: """ Builds the request body for the Content Safety API request. Args: - media_type (MediaType): The type of media to analyze. - content (str): The content to analyze. - blocklists (list[str]): The blocklists to use for text analysis. Returns: - dict: The request body for the Content Safety API request. """ if media_type == MediaType.Text: return { "text": content, "blocklistNames": blocklists, } elif media_type == MediaType.Image: return {"image": {"content": content}} else: error_message = f"Invalid Media Type {media_type}" raise AzureContentSafetyInputValueError(message=error_message) def detect( self, media_type: MediaType, content: str, blocklists: List[str] = [], ) -> dict: url = self.build_url(media_type) headers = self.build_headers() request_body = self.build_request_body(media_type, content, blocklists) payload = json.dumps(request_body) response = requests.post(url, headers=headers, data=payload) print("status code: " + response.status_code.__str__()) print("response txt: " + response.text) res_content = response.json() if response.status_code != 200: error_message = f"Error in detecting content: {res_content['error']['message']}" raise AzureContentSafetySystemError(message=error_message) return res_content def get_detect_result_by_category(self, category: Category, detect_result: dict) -> Union[int, None]: if category == Category.Hate: return detect_result.get("hateResult", None) elif category == Category.SelfHarm: return detect_result.get("selfHarmResult", None) elif category == Category.Sexual: return detect_result.get("sexualResult", None) elif category == Category.Violence: return detect_result.get("violenceResult", None) else: error_message = f"Invalid Category {category}" raise AzureContentSafetyInputValueError(message=error_message) def get_detect_result_by_category_1001(self, category: Category, detect_result: dict) -> Union[int, None]: category_res = detect_result.get("categoriesAnalysis", None) for res in category_res: if category.name == res.get("category", None): return res error_message = f"Invalid Category {category}" raise AzureContentSafetyInputValueError(message=error_message) def make_decision( self, detection_result: dict, reject_thresholds: Dict[Category, int], ) -> Decision: action_result = {} final_action = Action.Accept for category, threshold in reject_thresholds.items(): if threshold not in (-1, 0, 2, 4, 6): error_message = "RejectThreshold can only be in (-1, 0, 2, 4, 6)" raise AzureContentSafetyInputValueError(message=error_message) cate_detect_res = self.get_detect_result_by_category(category, detection_result) if cate_detect_res is None or "severity" not in cate_detect_res: error_message = f"Can not find detection result for {category}" raise AzureContentSafetySystemError(message=error_message) severity = cate_detect_res["severity"] action = Action.Reject if threshold != -1 and severity >= threshold else Action.Accept action_result[category] = action if action.value > final_action.value: final_action = action if ( "blocklistsMatchResults" in detection_result and detection_result["blocklistsMatchResults"] and len(detection_result["blocklistsMatchResults"]) > 0 ): final_action = Action.Reject print(f"Action result: {action_result}") return Decision(final_action, action_result) def make_decision_1001( self, detection_result: dict, reject_thresholds: Dict[Category, int], ) -> Decision: action_result = {} final_action = Action.Accept for category, threshold in reject_thresholds.items(): if threshold not in (-1, 0, 2, 4, 6): error_message = "RejectThreshold can only be in (-1, 0, 2, 4, 6)" raise AzureContentSafetyInputValueError(message=error_message) cate_detect_res = self.get_detect_result_by_category_1001( category, detection_result ) if cate_detect_res is None or "severity" not in cate_detect_res: error_message = f"Can not find detection result for {category}" raise AzureContentSafetySystemError(message=error_message) severity = cate_detect_res["severity"] action = ( Action.Reject if threshold != -1 and severity >= threshold else Action.Accept ) action_result[category] = action if action.value > final_action.value: final_action = action if ( "blocklistsMatch" in detection_result and detection_result["blocklistsMatch"] and len(detection_result["blocklistsMatch"]) > 0 ): final_action = Action.Reject print(f"Action result: {action_result}") return Decision(final_action, action_result)
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/exception.py
from openai import OpenAIError from promptflow.exceptions import ErrorTarget, SystemErrorException, UserErrorException openai_error_code_ref_message = "Error reference: https://platform.openai.com/docs/guides/error-codes/api-errors" def to_openai_error_message(e: Exception) -> str: ex_type = type(e).__name__ if str(e) == "<empty message>": msg = "The api key is invalid or revoked. " \ "You can correct or regenerate the api key of your connection." return f"OpenAI API hits {ex_type}: {msg}" # for models that do not support the `functions` parameter. elif "Unrecognized request argument supplied: functions" in str(e): msg = "Current model does not support the `functions` parameter. If you are using openai connection, then " \ "please use gpt-3.5-turbo, gpt-4, gpt-4-32k, gpt-3.5-turbo-0613 or gpt-4-0613. You can refer to " \ "https://platform.openai.com/docs/guides/gpt/function-calling. If you are using azure openai " \ "connection, then please first go to your Azure OpenAI resource, deploy model 'gpt-35-turbo' or " \ "'gpt-4' with version 0613, then go to prompt flow connection page, upgrade connection api version to " \ "'2023-07-01-preview'. You can refer to " \ "https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/function-calling." return f"OpenAI API hits {ex_type}: {msg}" elif "The completion operation does not work with the specified model" in str(e) or \ "logprobs, best_of and echo parameters are not available" in str(e): msg = "The completion operation does not work with the current model. " \ "Completion API is a legacy api and is going to be deprecated soon. " \ "Please change to use Chat API for current model. " \ "You could refer to guideline at https://aka.ms/pfdoc/chat-prompt " \ "or view the samples in our gallery that contain 'Chat' in the name." return f"OpenAI API hits {ex_type}: {msg}" elif "Invalid content type. image_url is only supported by certain models" in str(e): msg = "Current model does not support the image input. If you are using openai connection, then please use " \ "gpt-4-vision-preview. You can refer to https://platform.openai.com/docs/guides/vision." \ "If you are using azure openai connection, then please first go to your Azure OpenAI resource, " \ "create a GPT-4 Turbo with Vision deployment by selecting model name: \"gpt-4\" and "\ "model version \"vision-preview\". You can refer to " \ "https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/gpt-with-vision" return f"OpenAI API hits {ex_type}: {msg}" elif ("\'response_format\' of type" in str(e) and "is not supported with this model." in str(e))\ or ("Additional properties are not allowed" in str(e) and "unexpected) - \'response_format\'" in str(e)): msg = "The response_format parameter needs to be a dictionary such as {\"type\": \"text\"}. " \ "The value associated with the type key should be either 'text' or 'json_object' " \ "If you are using openai connection, you can only set response_format to { \"type\": \"json_object\" } " \ "when calling gpt-3.5-turbo-1106 or gpt-4-1106-preview to enable JSON mode. You can refer to " \ "https://platform.openai.com/docs/guides/text-generation/json-mode. If you are using azure openai " \ "connection, then please first go to your Azure OpenAI resource, deploy model 'gpt-35-turbo-1106' or " \ "'gpt-4-1106-preview'. You can refer to " \ "https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/json-mode?tabs=python." return f"OpenAI API hits {ex_type}: {msg}" else: return f"OpenAI API hits {ex_type}: {str(e)} [{openai_error_code_ref_message}]" class WrappedOpenAIError(UserErrorException): """Refine error messages on top of native openai errors.""" def __init__(self, ex: OpenAIError, **kwargs): self._ex = ex super().__init__(target=ErrorTarget.TOOL, **kwargs) @property def message(self): return str(to_openai_error_message(self._ex)) @property def error_codes(self): """The hierarchy of the error codes. We follow the "Microsoft REST API Guidelines" to define error codes in a hierarchy style. See the below link for details: https://github.com/microsoft/api-guidelines/blob/vNext/Guidelines.md#7102-error-condition-responses This list will be converted into an error code hierarchy by the prompt flow framework. For this case, it will be converted into a data structure that equivalent to: { "code": "UserError", "innerError": { "code": "OpenAIError", "innerError": { "code": self._ex.__class__.__name__, "innerError": None } } } """ return ["UserError", "OpenAIError", self._ex.__class__.__name__] class ExceedMaxRetryTimes(WrappedOpenAIError): """Base exception raised when retry exceeds max times.""" @property def message(self): return "Exceed max retry times. " + super().message class ToolValidationError(UserErrorException): """Base exception raised when failed to validate tool.""" def __init__(self, **kwargs): super().__init__(**kwargs, target=ErrorTarget.TOOL) class LLMError(UserErrorException): """Base exception raised when failed to call openai api with non-OpenAIError.""" def __init__(self, **kwargs): super().__init__(**kwargs, target=ErrorTarget.TOOL) class JinjaTemplateError(ToolValidationError): """Base exception raised when failed to render jinja template.""" pass class ChatAPIInvalidRole(ToolValidationError): """Base exception raised when failed to validate chat api role.""" pass class ChatAPIFunctionRoleInvalidFormat(ToolValidationError): """Base exception raised when failed to validate chat api function role format.""" pass class ChatAPIInvalidFunctions(ToolValidationError): """Base exception raised when failed to validate functions when call chat api.""" pass class FunctionCallNotSupportedInStreamMode(ToolValidationError): """Base exception raised when use functions parameter in stream mode when call chat api.""" pass class InvalidConnectionType(ToolValidationError): """Base exception raised when failed to pass invalid connection type.""" pass class SerpAPISystemError(SystemErrorException): """Base exception raised when failed to call serp api with system error.""" def __init__(self, **kwargs): super().__init__(**kwargs, target=ErrorTarget.TOOL) class SerpAPIUserError(UserErrorException): """Base exception raised when failed to call serp api with user error.""" def __init__(self, **kwargs): super().__init__(**kwargs, target=ErrorTarget.TOOL) class OpenModelLLMOnlineEndpointError(UserErrorException): """Base exception raised when the call to an online endpoint failed.""" def __init__(self, **kwargs): super().__init__(**kwargs, target=ErrorTarget.TOOL) class OpenModelLLMUserError(UserErrorException): """Base exception raised when the call to Open Model LLM failed with a user error.""" def __init__(self, **kwargs): super().__init__(**kwargs, target=ErrorTarget.TOOL) class OpenModelLLMKeyValidationError(ToolValidationError): """Base exception raised when failed to validate functions when call chat api.""" def __init__(self, **kwargs): super().__init__(**kwargs) class AzureContentSafetyInputValueError(UserErrorException): """Base exception raised when the input type of Azure Content Safety is invalid.""" def __init__(self, **kwargs): super().__init__(**kwargs, target=ErrorTarget.TOOL) class AzureContentSafetySystemError(SystemErrorException): """Base exception raised when failed to call Azure Content Safety api with system error.""" def __init__(self, **kwargs): super().__init__(**kwargs, target=ErrorTarget.TOOL)
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/common.py
import functools import json import re import sys import time from typing import List, Mapping from jinja2 import Template from openai import APIConnectionError, APIStatusError, OpenAIError, RateLimitError, APITimeoutError from promptflow.tools.exception import ChatAPIInvalidRole, WrappedOpenAIError, LLMError, JinjaTemplateError, \ ExceedMaxRetryTimes, ChatAPIInvalidFunctions, FunctionCallNotSupportedInStreamMode, \ ChatAPIFunctionRoleInvalidFormat, InvalidConnectionType from promptflow.connections import AzureOpenAIConnection, OpenAIConnection from promptflow.exceptions import SystemErrorException, UserErrorException class ChatInputList(list): """ ChatInputList is a list of ChatInput objects. It is used to override the __str__ method of list to return a string that can be easily parsed as message list. """ def __init__(self, iterable=None): super().__init__(iterable or []) def __str__(self): return "\n".join(map(str, self)) def validate_role(role: str, valid_roles: List[str] = None): if not valid_roles: valid_roles = ["assistant", "function", "user", "system"] if role not in valid_roles: valid_roles_str = ','.join([f'\'{role}:\\n\'' for role in valid_roles]) error_message = ( f"The Chat API requires a specific format for prompt definition, and the prompt should include separate " f"lines as role delimiters: {valid_roles_str}. Current parsed role '{role}'" f" does not meet the requirement. If you intend to use the Completion API, please select the appropriate" f" API type and deployment name. If you do intend to use the Chat API, please refer to the guideline at " f"https://aka.ms/pfdoc/chat-prompt or view the samples in our gallery that contain 'Chat' in the name." ) raise ChatAPIInvalidRole(message=error_message) def validate_functions(functions): function_example = json.dumps({ "name": "function_name", "parameters": { "type": "object", "properties": { "parameter_name": { "type": "integer", "description": "parameter_description" } } }, "description": "function_description" }) common_tsg = f"Here is a valid function example: {function_example}. See more details at " \ "https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions " \ "or view sample 'How to use functions with chat models' in our gallery." if len(functions) == 0: raise ChatAPIInvalidFunctions(message=f"functions cannot be an empty list. {common_tsg}") else: for i, function in enumerate(functions): # validate if the function is a dict if not isinstance(function, dict): raise ChatAPIInvalidFunctions(message=f"function {i} '{function}' is not a dict. {common_tsg}") # validate if has required keys for key in ["name", "parameters"]: if key not in function.keys(): raise ChatAPIInvalidFunctions( message=f"function {i} '{function}' does not have '{key}' property. {common_tsg}") # validate if the parameters is a dict if not isinstance(function["parameters"], dict): raise ChatAPIInvalidFunctions( message=f"function {i} '{function['name']}' parameters '{function['parameters']}' " f"should be described as a JSON Schema object. {common_tsg}") # validate if the parameters has required keys for key in ["type", "properties"]: if key not in function["parameters"].keys(): raise ChatAPIInvalidFunctions( message=f"function {i} '{function['name']}' parameters '{function['parameters']}' " f"does not have '{key}' property. {common_tsg}") # validate if the parameters type is object if function["parameters"]["type"] != "object": raise ChatAPIInvalidFunctions( message=f"function {i} '{function['name']}' parameters 'type' " f"should be 'object'. {common_tsg}") # validate if the parameters properties is a dict if not isinstance(function["parameters"]["properties"], dict): raise ChatAPIInvalidFunctions( message=f"function {i} '{function['name']}' parameters 'properties' " f"should be described as a JSON Schema object. {common_tsg}") def try_parse_name_and_content(role_prompt): # customer can add ## in front of name/content for markdown highlight. # and we still support name/content without ## prefix for backward compatibility. pattern = r"\n*#{0,2}\s*name:\n+\s*(\S+)\s*\n*#{0,2}\s*content:\n?(.*)" match = re.search(pattern, role_prompt, re.DOTALL) if match: return match.group(1), match.group(2) return None def parse_chat(chat_str, images: List = None, valid_roles: List[str] = None): if not valid_roles: valid_roles = ["system", "user", "assistant", "function"] # openai chat api only supports below roles. # customer can add single # in front of role name for markdown highlight. # and we still support role name without # prefix for backward compatibility. separator = r"(?i)^\s*#?\s*(" + "|".join(valid_roles) + r")\s*:\s*\n" images = images or [] hash2images = {str(x): x for x in images} chunks = re.split(separator, chat_str, flags=re.MULTILINE) chat_list = [] for chunk in chunks: last_message = chat_list[-1] if len(chat_list) > 0 else None if last_message and "role" in last_message and "content" not in last_message: parsed_result = try_parse_name_and_content(chunk) if parsed_result is None: # "name" is required if the role is "function" if last_message["role"] == "function": raise ChatAPIFunctionRoleInvalidFormat( message="Failed to parse function role prompt. Please make sure the prompt follows the " "format: 'name:\\nfunction_name\\ncontent:\\nfunction_content'. " "'name' is required if role is function, and it should be the name of the function " "whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, " "with a maximum length of 64 characters. See more details in " "https://platform.openai.com/docs/api-reference/chat/create#chat/create-name " "or view sample 'How to use functions with chat models' in our gallery.") # "name" is optional for other role types. else: last_message["content"] = to_content_str_or_list(chunk, hash2images) else: last_message["name"] = parsed_result[0] last_message["content"] = to_content_str_or_list(parsed_result[1], hash2images) else: if chunk.strip() == "": continue # Check if prompt follows chat api message format and has valid role. # References: https://platform.openai.com/docs/api-reference/chat/create. role = chunk.strip().lower() validate_role(role, valid_roles=valid_roles) new_message = {"role": role} chat_list.append(new_message) return chat_list def to_content_str_or_list(chat_str: str, hash2images: Mapping): chat_str = chat_str.strip() chunks = chat_str.split("\n") include_image = False result = [] for chunk in chunks: if chunk.strip() in hash2images: image_message = {} image_message["type"] = "image_url" image_url = hash2images[chunk.strip()].source_url \ if hasattr(hash2images[chunk.strip()], "source_url") else None if not image_url: image_bs64 = hash2images[chunk.strip()].to_base64() image_mine_type = hash2images[chunk.strip()]._mime_type image_url = {"url": f"data:{image_mine_type};base64,{image_bs64}"} image_message["image_url"] = image_url result.append(image_message) include_image = True elif chunk.strip() == "": continue else: result.append({"type": "text", "text": chunk}) return result if include_image else chat_str def handle_openai_error(tries: int = 10, delay: float = 8.0): """ A decorator function that used to handle OpenAI error. OpenAI Error falls into retriable vs non-retriable ones. For retriable error, the decorator use below parameters to control its retry activity with exponential backoff: `tries` : max times for the function invocation, type is int 'delay': base delay seconds for exponential delay, type is float """ def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): for i in range(tries + 1): try: return func(*args, **kwargs) except (SystemErrorException, UserErrorException) as e: # Throw inner wrapped exception directly raise e except (APIStatusError, APIConnectionError) as e: # Handle retriable exception, please refer to # https://platform.openai.com/docs/guides/error-codes/api-errors print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr) if isinstance(e, APIConnectionError) and not isinstance(e, APITimeoutError) \ and "connection aborted" not in str(e).lower(): raise WrappedOpenAIError(e) # Retry InternalServerError(>=500), RateLimitError(429), UnprocessableEntityError(422) if isinstance(e, APIStatusError): status_code = e.response.status_code if status_code < 500 and status_code not in [429, 422]: raise WrappedOpenAIError(e) if isinstance(e, RateLimitError) and getattr(e, "type", None) == "insufficient_quota": # Exit retry if this is quota insufficient error print(f"{type(e).__name__} with insufficient quota. Throw user error.", file=sys.stderr) raise WrappedOpenAIError(e) if i == tries: # Exit retry if max retry reached print(f"{type(e).__name__} reached max retry. Exit retry with user error.", file=sys.stderr) raise ExceedMaxRetryTimes(e) if hasattr(e, 'response') and e.response is not None: retry_after_in_header = e.response.headers.get("retry-after", None) else: retry_after_in_header = None if not retry_after_in_header: retry_after_seconds = delay * (2 ** i) msg = ( f"{type(e).__name__} #{i}, but no Retry-After header, " + f"Back off {retry_after_seconds} seconds for retry." ) print(msg, file=sys.stderr) else: retry_after_seconds = float(retry_after_in_header) * (2 ** i) msg = ( f"{type(e).__name__} #{i}, Retry-After={retry_after_in_header}, " f"Back off {retry_after_seconds} seconds for retry." ) print(msg, file=sys.stderr) time.sleep(retry_after_seconds) except OpenAIError as e: # For other non-retriable errors from OpenAIError, # For example, AuthenticationError, APIConnectionError, BadRequestError, NotFoundError # Mark UserError for all the non-retriable OpenAIError print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr) raise WrappedOpenAIError(e) except Exception as e: print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr) error_message = f"OpenAI API hits exception: {type(e).__name__}: {str(e)}" raise LLMError(message=error_message) return wrapper return decorator def to_bool(value) -> bool: return str(value).lower() == "true" def render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs): try: return Template(prompt, trim_blocks=trim_blocks, keep_trailing_newline=keep_trailing_newline).render(**kwargs) except Exception as e: # For exceptions raised by jinja2 module, mark UserError print(f"Exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr) error_message = f"Failed to render jinja template: {type(e).__name__}: {str(e)}. " \ + "Please modify your prompt to fix the issue." raise JinjaTemplateError(message=error_message) from e def process_function_call(function_call): if function_call is None: param = "auto" elif function_call == "auto" or function_call == "none": param = function_call else: function_call_example = json.dumps({"name": "function_name"}) common_tsg = f"Here is a valid example: {function_call_example}. See the guide at " \ "https://platform.openai.com/docs/api-reference/chat/create#chat/create-function_call " \ "or view sample 'How to call functions with chat models' in our gallery." param = function_call if not isinstance(param, dict): raise ChatAPIInvalidFunctions( message=f"function_call parameter '{param}' must be a dict, but not {type(function_call)}. {common_tsg}" ) else: if "name" not in function_call: raise ChatAPIInvalidFunctions( message=f'function_call parameter {json.dumps(param)} must contain "name" field. {common_tsg}' ) return param def post_process_chat_api_response(completion, stream, functions): if stream: if functions is not None: error_message = "Function calling has not been supported by stream mode yet." raise FunctionCallNotSupportedInStreamMode(message=error_message) def generator(): for chunk in completion: if chunk.choices: yield chunk.choices[0].delta.content if hasattr(chunk.choices[0].delta, 'content') and \ chunk.choices[0].delta.content is not None else "" # We must return the generator object, not using yield directly here. # Otherwise, the function itself will become a generator, despite whether stream is True or False. return generator() else: # When calling function, function_call response will be returned as a field in message, so we need return # message directly. Otherwise, we only return content. if functions is not None: return completion.model_dump()["choices"][0]["message"] else: # chat api may return message with no content. return getattr(completion.choices[0].message, "content", "") def preprocess_template_string(template_string: str) -> str: """Remove the image input decorator from the template string and place the image input in a new line.""" pattern = re.compile(r'\!\[(\s*image\s*)\]\(\{\{(\s*[^\s{}]+\s*)\}\}\)') # Find all matches in the input string matches = pattern.findall(template_string) # Perform substitutions for match in matches: original = f"![{match[0]}]({{{{{match[1]}}}}})" replacement = f"\n{{{{{match[1]}}}}}\n" template_string = template_string.replace(original, replacement) return template_string def convert_to_chat_list(obj): if isinstance(obj, dict): return {key: convert_to_chat_list(value) for key, value in obj.items()} elif isinstance(obj, list): return ChatInputList([convert_to_chat_list(item) for item in obj]) else: return obj def add_referenced_images_to_set(value, image_set, image_type): if isinstance(value, image_type): image_set.add(value) elif isinstance(value, list): for item in value: add_referenced_images_to_set(item, image_set, image_type) elif isinstance(value, dict): for _, item in value.items(): add_referenced_images_to_set(item, image_set, image_type) def find_referenced_image_set(kwargs: dict): referenced_images = set() try: from promptflow.contracts.multimedia import Image for _, value in kwargs.items(): add_referenced_images_to_set(value, referenced_images, Image) except ImportError: pass return referenced_images def normalize_connection_config(connection): """ Normalizes the configuration of a given connection object for compatibility. This function takes a connection object and normalizes its configuration, ensuring it is compatible and standardized for use. """ if isinstance(connection, AzureOpenAIConnection): return { "api_key": connection.api_key, "api_version": connection.api_version, "azure_endpoint": connection.api_base } elif isinstance(connection, OpenAIConnection): return { "api_key": connection.api_key, "organization": connection.organization, "base_url": connection.base_url } else: error_message = f"Not Support connection type '{type(connection).__name__}'. " \ f"Connection type should be in [AzureOpenAIConnection, OpenAIConnection]." raise InvalidConnectionType(message=error_message)
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/open_model_llm.py
import functools import json import os import re import requests import sys import time import tempfile from abc import abstractmethod from datetime import datetime, timedelta from enum import Enum from typing import Any, Dict, List, Tuple, Optional, Union from promptflow._core.tool import ToolProvider, tool from promptflow._sdk._constants import ConnectionType from promptflow.connections import CustomConnection from promptflow.contracts.types import PromptTemplate from promptflow.tools.common import render_jinja_template, validate_role from promptflow.tools.exception import ( OpenModelLLMOnlineEndpointError, OpenModelLLMUserError, OpenModelLLMKeyValidationError, ChatAPIInvalidRole ) DEPLOYMENT_DEFAULT = "default" CONNECTION_CACHE_FILE = "pf_connection_names" VALID_LLAMA_ROLES = {"system", "user", "assistant"} AUTH_REQUIRED_CONNECTION_TYPES = {"serverlessendpoint", "onlineendpoint", "connection"} REQUIRED_CONFIG_KEYS = ["endpoint_url", "model_family"] REQUIRED_SECRET_KEYS = ["endpoint_api_key"] ENDPOINT_REQUIRED_ENV_VARS = ["AZUREML_ARM_SUBSCRIPTION", "AZUREML_ARM_RESOURCEGROUP", "AZUREML_ARM_WORKSPACE_NAME"] def handle_online_endpoint_error(max_retries: int = 5, initial_delay: float = 2, exponential_base: float = 3): def deco_retry(func): @functools.wraps(func) def wrapper(*args, **kwargs): delay = initial_delay for i in range(max_retries): try: return func(*args, **kwargs) except OpenModelLLMOnlineEndpointError as e: if i == max_retries - 1: error_message = f"Exception hit calling Online Endpoint: {type(e).__name__}: {str(e)}" print(error_message, file=sys.stderr) raise OpenModelLLMOnlineEndpointError(message=error_message) delay *= exponential_base time.sleep(delay) return wrapper return deco_retry class ConnectionCache: def __init__(self, use_until: datetime, subscription_id: str, resource_group: str, workspace_name: str, connection_names: List[str]): self.use_until = use_until self.subscription_id = subscription_id self.resource_group = resource_group self.workspace_name = workspace_name self.connection_names = connection_names @classmethod def from_filename(self, file): cache = json.load(file) return self(cache['use_until'], cache['subscription_id'], cache['resource_group'], cache['workspace_name'], cache['connection_names']) def can_use(self, subscription_id: str, resource_group: str, workspace_name: str): use_until_time = datetime.fromisoformat(self.use_until) return (use_until_time > datetime.now() and self.subscription_id == subscription_id and self.resource_group == resource_group and self.workspace_name == workspace_name) class Endpoint: def __init__(self, endpoint_name: str, endpoint_url: str, endpoint_api_key: str): self.deployments: List[Deployment] = [] self.default_deployment: Deployment = None self.endpoint_url = endpoint_url self.endpoint_api_key = endpoint_api_key self.endpoint_name = endpoint_name class Deployment: def __init__(self, deployment_name: str, model_family: str): self.model_family = model_family self.deployment_name = deployment_name class ServerlessEndpointsContainer: API_VERSION = "2023-08-01-preview" def _get_headers(self, token: str) -> Dict[str, str]: headers = { "Authorization": f"Bearer {token}", "Content-Type": "application/json", } return headers def get_serverless_arm_url(self, subscription_id, resource_group, workspace_name, suffix=None): suffix = "" if suffix is None else f"/{suffix}" return f"https://management.azure.com/subscriptions/{subscription_id}" \ + f"/resourceGroups/{resource_group}/providers/Microsoft.MachineLearningServices" \ + f"/workspaces/{workspace_name}/serverlessEndpoints{suffix}?api-version={self.API_VERSION}" def _list(self, token: str, subscription_id: str, resource_group: str, workspace_name: str): headers = self._get_headers(token) url = self.get_serverless_arm_url(subscription_id, resource_group, workspace_name) try: response = requests.get(url, headers=headers, timeout=50) return json.loads(response.content)['value'] except Exception as e: print(f"Error encountered when listing serverless endpoints. Exception: {e}", file=sys.stderr) return [] def _validate_model_family(self, serverless_endpoint): try: if serverless_endpoint.get('properties', {}).get('provisioningState') != "Succeeded": return None if (try_get_from_dict(serverless_endpoint, ['properties', 'offer', 'publisher']) == 'Meta' and "llama" in try_get_from_dict(serverless_endpoint, ['properties', 'offer', 'offerName'])): return ModelFamily.LLAMA if (try_get_from_dict(serverless_endpoint, ['properties', 'marketplaceInfo', 'publisherId']) == 'metagenai' and "llama" in try_get_from_dict(serverless_endpoint, ['properties', 'marketplaceInfo', 'offerId'])): return ModelFamily.LLAMA except Exception as ex: print(f"Ignoring endpoint {serverless_endpoint['id']} due to error: {ex}", file=sys.stderr) return None def list_serverless_endpoints(self, token, subscription_id, resource_group, workspace_name, return_endpoint_url: bool = False): serverlessEndpoints = self._list(token, subscription_id, resource_group, workspace_name) result = [] for e in serverlessEndpoints: if (self._validate_model_family(e)): result.append({ "value": f"serverlessEndpoint/{e['name']}", "display_value": f"[Serverless] {e['name']}", # "hyperlink": self.get_endpoint_url(e.endpoint_name) "description": f"Serverless Endpoint: {e['name']}", }) if return_endpoint_url: result[-1]['url'] = try_get_from_dict(e, ['properties', 'inferenceEndpoint', 'uri']) return result def _list_endpoint_key(self, token: str, subscription_id: str, resource_group: str, workspace_name: str, serverless_endpoint_name: str): headers = self._get_headers(token) url = self.get_serverless_arm_url(subscription_id, resource_group, workspace_name, f"{serverless_endpoint_name}/listKeys") try: response = requests.post(url, headers=headers, timeout=50) return json.loads(response.content) except Exception as e: print(f"Unable to get key from selected serverless endpoint. Exception: {e}", file=sys.stderr) def get_serverless_endpoint(self, token: str, subscription_id: str, resource_group: str, workspace_name: str, serverless_endpoint_name: str): headers = self._get_headers(token) url = self.get_serverless_arm_url(subscription_id, resource_group, workspace_name, serverless_endpoint_name) try: response = requests.get(url, headers=headers, timeout=50) return json.loads(response.content) except Exception as e: print(f"Unable to get selected serverless endpoint. Exception: {e}", file=sys.stderr) def get_serverless_endpoint_key(self, token: str, subscription_id: str, resource_group: str, workspace_name: str, serverless_endpoint_name: str) -> Tuple[str, str, str]: endpoint = self.get_serverless_endpoint(token, subscription_id, resource_group, workspace_name, serverless_endpoint_name) endpoint_url = try_get_from_dict(endpoint, ['properties', 'inferenceEndpoint', 'uri']) model_family = self._validate_model_family(endpoint) endpoint_api_key = self._list_endpoint_key(token, subscription_id, resource_group, workspace_name, serverless_endpoint_name)['primaryKey'] return (endpoint_url, endpoint_api_key, model_family) class CustomConnectionsContainer: def get_azure_custom_connection_names(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str, return_endpoint_url: bool = False ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: result = [] try: from promptflow.azure import PFClient as AzurePFClient azure_pf_client = AzurePFClient( credential=credential, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name) except Exception: message = "Skipping Azure PFClient. To connect, please ensure the following environment variables are set: " message += ",".join(ENDPOINT_REQUIRED_ENV_VARS) print(message, file=sys.stderr) return result connections = azure_pf_client._connections.list() for c in connections: if c.type == ConnectionType.CUSTOM and "model_family" in c.configs: try: validate_model_family(c.configs["model_family"]) result.append({ "value": f"connection/{c.name}", "display_value": f"[Connection] {c.name}", # "hyperlink": "", "description": f"Custom Connection: {c.name}", }) if return_endpoint_url: result[-1]['url'] = c.configs['endpoint_url'] except Exception: # silently ignore unsupported model family continue return result def get_local_custom_connection_names(self, return_endpoint_url: bool = False ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: result = [] try: from promptflow import PFClient as LocalPFClient except Exception as e: print(f"Skipping Local PFClient. Exception: {e}", file=sys.stderr) return result pf = LocalPFClient() connections = pf.connections.list() for c in connections: if c.type == ConnectionType.CUSTOM and "model_family" in c.configs: try: validate_model_family(c.configs["model_family"]) result.append({ "value": f"localConnection/{c.name}", "display_value": f"[Local Connection] {c.name}", # "hyperlink": "", "description": f"Local Custom Connection: {c.name}", }) if return_endpoint_url: result[-1]['url'] = c.configs['endpoint_url'] except Exception: # silently ignore unsupported model family continue return result def get_endpoint_from_local_custom_connection(self, connection_name) -> Tuple[str, str, str]: from promptflow import PFClient as LocalPFClient pf = LocalPFClient() connection = pf.connections.get(connection_name, with_secrets=True) return self.get_endpoint_from_custom_connection(connection) def get_endpoint_from_azure_custom_connection(self, credential, subscription_id, resource_group_name, workspace_name, connection_name) -> Tuple[str, str, str]: from promptflow.azure import PFClient as AzurePFClient azure_pf_client = AzurePFClient( credential=credential, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name) connection = azure_pf_client._arm_connections.get(connection_name) return self.get_endpoint_from_custom_connection(connection) def get_endpoint_from_custom_connection(self, connection: CustomConnection) -> Tuple[str, str, str]: conn_dict = dict(connection) for key in REQUIRED_CONFIG_KEYS: if key not in conn_dict: accepted_keys = ",".join([key for key in REQUIRED_CONFIG_KEYS]) raise OpenModelLLMKeyValidationError( message=f"""Required key `{key}` not found in given custom connection. Required keys are: {accepted_keys}.""" ) for key in REQUIRED_SECRET_KEYS: if key not in conn_dict: accepted_keys = ",".join([key for key in REQUIRED_SECRET_KEYS]) raise OpenModelLLMKeyValidationError( message=f"""Required secret key `{key}` not found in given custom connection. Required keys are: {accepted_keys}.""" ) model_family = validate_model_family(connection.configs['model_family']) return (connection.configs['endpoint_url'], connection.secrets['endpoint_api_key'], model_family) def list_custom_connection_names(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str, return_endpoint_url: bool = False ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: azure_custom_connections = self.get_azure_custom_connection_names(credential, subscription_id, resource_group_name, workspace_name, return_endpoint_url) local_custom_connections = self.get_local_custom_connection_names(return_endpoint_url) return azure_custom_connections + local_custom_connections class EndpointsContainer: def get_ml_client(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str): try: from azure.ai.ml import MLClient return MLClient( credential=credential, subscription_id=subscription_id, resource_group_name=resource_group_name, workspace_name=workspace_name) except Exception as e: message = "Unable to connect to AzureML. Please ensure the following environment variables are set: " message += ",".join(ENDPOINT_REQUIRED_ENV_VARS) message += "\nException: " + str(e) raise OpenModelLLMOnlineEndpointError(message=message) def get_endpoints_and_deployments(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str) -> List[Endpoint]: ml_client = self.get_ml_client(credential, subscription_id, resource_group_name, workspace_name) list_of_endpoints: List[Endpoint] = [] for ep in ml_client.online_endpoints.list(): endpoint = Endpoint( endpoint_name=ep.name, endpoint_url=ep.scoring_uri, endpoint_api_key=ml_client.online_endpoints.get_keys(ep.name).primary_key) ordered_deployment_names = sorted(ep.traffic, key=lambda item: item[1]) deployments = ml_client.online_deployments.list(ep.name) for deployment_name in ordered_deployment_names: for d in deployments: if d.name == deployment_name: model_family = get_model_type(d.model) if model_family is None: continue deployment = Deployment(deployment_name=d.name, model_family=model_family) endpoint.deployments.append(deployment) # Deployment are ordered by traffic level, first in is default if endpoint.default_deployment is None: endpoint.default_deployment = deployment if len(endpoint.deployments) > 0: list_of_endpoints.append(endpoint) self.__endpoints_and_deployments = list_of_endpoints return self.__endpoints_and_deployments def get_endpoint_url(self, endpoint_name, subscription_id, resource_group_name, workspace_name): return f"https://ml.azure.com/endpoints/realtime/{endpoint_name}" \ + f"/detail?wsid=/subscriptions/{subscription_id}" \ + f"/resourceGroups/{resource_group_name}" \ + f"/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}" def list_endpoint_names(self, credential, subscription_id, resource_group_name, workspace_name, return_endpoint_url: bool = False ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: '''Function for listing endpoints in the UX''' endpoints_and_deployments = self.get_endpoints_and_deployments( credential, subscription_id, resource_group_name, workspace_name) result = [] for e in endpoints_and_deployments: result.append({ "value": f"onlineEndpoint/{e.endpoint_name}", "display_value": f"[Online] {e.endpoint_name}", "hyperlink": self.get_endpoint_url(e.endpoint_name, subscription_id, resource_group_name, workspace_name), "description": f"Online Endpoint: {e.endpoint_name}", }) if return_endpoint_url: result[-1]['url'] = e.endpoint_url return result def list_deployment_names(self, credential, subscription_id, resource_group_name, workspace_name, endpoint_name: str ) -> List[Dict[str, Union[str, int, float, list, Dict]]]: '''Function for listing deployments in the UX''' if endpoint_name is None: return [] endpoints_and_deployments = self.get_endpoints_and_deployments( credential, subscription_id, resource_group_name, workspace_name) for endpoint in endpoints_and_deployments: if endpoint.endpoint_name == endpoint_name: result = [] for d in endpoint.deployments: result.append({ "value": d.deployment_name, "display_value": d.deployment_name, # "hyperlink": '', "description": f"this is {d.deployment_name} item", }) return result return [] ENDPOINT_CONTAINER = EndpointsContainer() CUSTOM_CONNECTION_CONTAINER = CustomConnectionsContainer() SERVERLESS_ENDPOINT_CONTAINER = ServerlessEndpointsContainer() def is_serverless_endpoint(endpoint_url: str) -> bool: return "serverless.ml.azure.com" in endpoint_url or "inference.ai.azure.com" in endpoint_url def try_get_from_dict(some_dict: Dict, key_list: List): for key in key_list: if some_dict is None: return some_dict elif key in some_dict: some_dict = some_dict[key] else: return None return some_dict def parse_endpoint_connection_type(endpoint_connection_name: str) -> Tuple[str, str]: endpoint_connection_details = endpoint_connection_name.split("/") return (endpoint_connection_details[0].lower(), endpoint_connection_details[1]) def list_endpoint_names(subscription_id: str, resource_group_name: str, workspace_name: str, return_endpoint_url: bool = False, force_refresh: bool = False) -> List[Dict[str, Union[str, int, float, list, Dict]]]: cache_file_path = None try: with tempfile.NamedTemporaryFile(delete=False) as temp_file: cache_file_path = os.path.join(os.path.dirname(temp_file.name), CONNECTION_CACHE_FILE) print(f"Attempting to read connection cache. File path: {cache_file_path}", file=sys.stdout) if force_refresh: print("....skipping. force_refresh is True", file=sys.stdout) else: with open(cache_file_path, 'r') as file: cache = ConnectionCache.from_filename(file) if cache.can_use(subscription_id, resource_group_name, workspace_name): if len(cache.connection_names) > 0: print("....using Connection Cache File", file=sys.stdout) return cache.connection_names else: print("....skipping. No connections in file", file=sys.stdout) else: print("....skipping. File not relevant", file=sys.stdout) except Exception as e: print(f"....failed to find\\read connection cache file. Regenerating. Error:{e}", file=sys.stdout) try: from azure.identity import DefaultAzureCredential credential = DefaultAzureCredential(exclude_interactive_browser_credential=False) token = credential.get_token("https://management.azure.com/.default").token except Exception as e: print(f"Skipping list_endpoint_names. Exception: {e}", file=sys.stderr) msg = "Exception getting token: Please retry" return [{"value": msg, "display_value": msg, "description": msg}] serverless_endpoints = SERVERLESS_ENDPOINT_CONTAINER.list_serverless_endpoints(token, subscription_id, resource_group_name, workspace_name, return_endpoint_url) online_endpoints = ENDPOINT_CONTAINER.list_endpoint_names(credential, subscription_id, resource_group_name, workspace_name, return_endpoint_url) custom_connections = CUSTOM_CONNECTION_CONTAINER.list_custom_connection_names(credential, subscription_id, resource_group_name, workspace_name, return_endpoint_url) list_of_endpoints = custom_connections + serverless_endpoints + online_endpoints cache = ConnectionCache(use_until=(datetime.now() + timedelta(minutes=5)).isoformat(), subscription_id=subscription_id, resource_group=resource_group_name, workspace_name=workspace_name, connection_names=list_of_endpoints) if len(list_of_endpoints) == 0: msg = "No endpoints found. Please add a connection." return [{"value": msg, "display_value": msg, "description": msg}] if cache_file_path is not None: try: print(f"Attempting to write connection cache. File path: {cache_file_path}", file=sys.stdout) with open(cache_file_path, 'w') as file: json.dump(cache, file, default=lambda obj: obj.__dict__) print("....written", file=sys.stdout) except Exception as e: print(f"""....failed to write connection cache file. Will need to reload next time. Error:{e}""", file=sys.stdout) return list_of_endpoints def list_deployment_names(subscription_id: str, resource_group_name: str, workspace_name: str, endpoint: str = None) -> List[Dict[str, Union[str, int, float, list, Dict]]]: deployment_default_list = [{ "value": DEPLOYMENT_DEFAULT, "display_value": DEPLOYMENT_DEFAULT, "description": "This will use the default deployment for the selected online endpoint." + "You can also manually enter a deployment name here." }] if endpoint is None or endpoint.strip() == "" or "/" not in endpoint: return deployment_default_list (endpoint_connection_type, endpoint_connection_name) = parse_endpoint_connection_type(endpoint) if endpoint_connection_type != "onlineendpoint": return deployment_default_list try: from azure.identity import DefaultAzureCredential credential = DefaultAzureCredential(exclude_interactive_browser_credential=False) except Exception as e: print(f"Skipping list_deployment_names. Exception: {e}", file=sys.stderr) return deployment_default_list return deployment_default_list + ENDPOINT_CONTAINER.list_deployment_names( credential, subscription_id, resource_group_name, workspace_name, endpoint_connection_name ) def get_model_type(deployment_model: str) -> str: m = re.match(r'azureml://registries/[^/]+/models/([^/]+)/versions/', deployment_model) if m is None: print(f"Unexpected model format: {deployment_model}. Skipping", file=sys.stdout) return None model = m[1].lower() if model.startswith("llama-2"): return ModelFamily.LLAMA elif model.startswith("tiiuae-falcon"): return ModelFamily.FALCON elif model.startswith("databricks-dolly-v2"): return ModelFamily.DOLLY elif model.startswith("gpt2"): return ModelFamily.GPT2 else: # Not found and\or handled. Ignore this endpoint\deployment print(f"Unexpected model type: {model} derived from deployed model: {deployment_model}") return None def validate_model_family(model_family: str): try: return ModelFamily[model_family] except KeyError: accepted_models = ",".join([model.name for model in ModelFamily]) raise OpenModelLLMKeyValidationError( message=f"""Given model_family '{model_family}' not recognized. Supported models are: {accepted_models}.""" ) class ModelFamily(str, Enum): LLAMA = "LLaMa" DOLLY = "Dolly" GPT2 = "GPT-2" FALCON = "Falcon" @classmethod def _missing_(cls, value): value = value.lower() for member in cls: if member.lower() == value: return member return None STANDARD_CONTRACT_MODELS = [ModelFamily.DOLLY, ModelFamily.GPT2, ModelFamily.FALCON] class API(str, Enum): CHAT = "chat" COMPLETION = "completion" class ContentFormatterBase: """Transform request and response of AzureML endpoint to match with required schema. """ content_type: Optional[str] = "application/json" """The MIME type of the input data passed to the endpoint""" accepts: Optional[str] = "application/json" """The MIME type of the response data returned from the endpoint""" @staticmethod def escape_special_characters(prompt: str) -> str: """Escapes any special characters in `prompt`""" return re.sub( r'\\([\\\"a-zA-Z])', r'\\\1', prompt) @staticmethod def parse_chat(chat_str: str) -> List[Dict[str, str]]: # LLaMa only supports below roles. separator = r"(?i)\n*(system|user|assistant)\s*:\s*\n" chunks = re.split(separator, chat_str) # remove any empty chunks chunks = [c.strip() for c in chunks if c.strip()] chat_list = [] for index in range(0, len(chunks), 2): role = chunks[index].lower() # Check if prompt follows chat api message format and has valid role. try: validate_role(role, VALID_LLAMA_ROLES) except ChatAPIInvalidRole as e: raise OpenModelLLMUserError(message=e.message) if len(chunks) <= index + 1: message = "Unexpected chat format. Please ensure the query matches the chat format of the model used." raise OpenModelLLMUserError(message=message) chat_list.append({ "role": role, "content": chunks[index+1] }) return chat_list @abstractmethod def format_request_payload(self, prompt: str, model_kwargs: Dict) -> str: """Formats the request body according to the input schema of the model. Returns bytes or seekable file like object in the format specified in the content_type request header. """ @abstractmethod def format_response_payload(self, output: bytes) -> str: """Formats the response body according to the output schema of the model. Returns the data type that is received from the response. """ class MIRCompleteFormatter(ContentFormatterBase): """Content handler for LLMs from the HuggingFace catalog.""" def format_request_payload(self, prompt: str, model_kwargs: Dict) -> str: input_str = json.dumps( { "input_data": {"input_string": [ContentFormatterBase.escape_special_characters(prompt)]}, "parameters": model_kwargs, } ) return input_str def format_response_payload(self, output: bytes) -> str: """These models only support generation - expect a single output style""" response_json = json.loads(output) if len(response_json) > 0 and "0" in response_json[0]: if "0" in response_json[0]: return response_json[0]["0"] elif "output" in response_json: return response_json["output"] error_message = f"Unexpected response format. Response: {response_json}" print(error_message, file=sys.stderr) raise OpenSourceLLMOnlineEndpointError(message=error_message) class LlamaContentFormatter(ContentFormatterBase): """Content formatter for LLaMa""" def __init__(self, api: API, chat_history: Optional[str] = ""): super().__init__() self.api = api self.chat_history = chat_history def format_request_payload(self, prompt: str, model_kwargs: Dict) -> str: """Formats the request according the the chosen api""" if "do_sample" not in model_kwargs: model_kwargs["do_sample"] = True if self.api == API.CHAT: prompt_value = ContentFormatterBase.parse_chat(self.chat_history) else: prompt_value = [ContentFormatterBase.escape_special_characters(prompt)] return json.dumps( { "input_data": { "input_string": prompt_value, "parameters": model_kwargs } } ) def format_response_payload(self, output: bytes) -> str: """Formats response""" response_json = json.loads(output) if self.api == API.CHAT and "output" in response_json: return response_json["output"] elif self.api == API.COMPLETION and len(response_json) > 0 and "0" in response_json[0]: return response_json[0]["0"] else: error_message = f"Unexpected response format. Response: {response_json}" print(error_message, file=sys.stderr) raise OpenModelLLMOnlineEndpointError(message=error_message) class ServerlessLlamaContentFormatter(ContentFormatterBase): """Content formatter for LLaMa""" def __init__(self, api: API, chat_history: Optional[str] = ""): super().__init__() self.api = api self.chat_history = chat_history self.model_id = "llama-2-7b-hf" def format_request_payload(self, prompt: str, model_kwargs: Dict) -> str: """Formats the request according the the chosen api""" # Modify max_tokens key for serverless model_kwargs["max_tokens"] = model_kwargs["max_new_tokens"] if self.api == API.CHAT: messages = ContentFormatterBase.parse_chat(self.chat_history) base_body = { "model": self.model_id, "messages": messages, "n": 1, } base_body.update(model_kwargs) else: prompt_value = ContentFormatterBase.escape_special_characters(prompt) base_body = { "prompt": prompt_value, "n": 1, } base_body.update(model_kwargs) return json.dumps(base_body) def format_response_payload(self, output: bytes) -> str: """Formats response""" response_json = json.loads(output) if self.api == API.CHAT and "choices" in response_json: return response_json["choices"][0]["message"]["content"] elif self.api == API.COMPLETION and "choices" in response_json: return response_json["choices"][0]["text"] else: error_message = f"Unexpected response format. Response: {response_json}" print(error_message, file=sys.stderr) raise OpenModelLLMOnlineEndpointError(message=error_message) class ContentFormatterFactory: """Factory class for supported models""" def get_content_formatter( model_family: ModelFamily, api: API, chat_history: Optional[List[Dict]] = [], endpoint_url: Optional[str] = "" ) -> ContentFormatterBase: if model_family == ModelFamily.LLAMA: if is_serverless_endpoint(endpoint_url): return ServerlessLlamaContentFormatter(chat_history=chat_history, api=api) else: return LlamaContentFormatter(chat_history=chat_history, api=api) elif model_family in STANDARD_CONTRACT_MODELS: return MIRCompleteFormatter() class AzureMLOnlineEndpoint: """Azure ML Online Endpoint models.""" endpoint_url: str = "" """URL of pre-existing Endpoint. Should be passed to constructor or specified as env var `AZUREML_ENDPOINT_URL`.""" endpoint_api_key: str = "" """Authentication Key for Endpoint. Should be passed to constructor or specified as env var `AZUREML_ENDPOINT_API_KEY`.""" content_formatter: Any = None """The content formatter that provides an input and output transform function to handle formats between the LLM and the endpoint""" model_kwargs: Optional[Dict] = {} """Key word arguments to pass to the model.""" def __init__( self, endpoint_url: str, endpoint_api_key: str, content_formatter: ContentFormatterBase, model_family: ModelFamily, deployment_name: Optional[str] = None, model_kwargs: Optional[Dict] = {}, ): self.endpoint_url = endpoint_url self.endpoint_api_key = endpoint_api_key self.deployment_name = deployment_name self.content_formatter = content_formatter self.model_kwargs = model_kwargs self.model_family = model_family def _call_endpoint(self, request_body: str) -> str: """call.""" headers = { "Content-Type": "application/json", "Authorization": ("Bearer " + self.endpoint_api_key), "x-ms-user-agent": "PromptFlow/OpenModelLLM/" + self.model_family } # If this is not set it'll use the default deployment on the endpoint. if self.deployment_name is not None: headers["azureml-model-deployment"] = self.deployment_name result = requests.post(self.endpoint_url, data=request_body, headers=headers) if result.status_code != 200: error_message = f"""Request failure while calling Online Endpoint Status:{result.status_code} Error:{result.text}""" print(error_message, file=sys.stderr) raise OpenModelLLMOnlineEndpointError(message=error_message) return result.text def __call__( self, prompt: str ) -> str: """Call out to an AzureML Managed Online endpoint. Args: prompt: The prompt to pass into the model. Returns: The string generated by the model. Example: .. code-block:: python response = azureml_model("Tell me a joke.") """ request_body = self.content_formatter.format_request_payload(prompt, self.model_kwargs) endpoint_response = self._call_endpoint(request_body) response = self.content_formatter.format_response_payload(endpoint_response) return response class OpenModelLLM(ToolProvider): def __init__(self): super().__init__() def get_deployment_from_endpoint(self, credential, subscription_id: str, resource_group_name: str, workspace_name: str, endpoint_name: str, deployment_name: str = None) -> Tuple[str, str, str]: endpoints_and_deployments = ENDPOINT_CONTAINER.get_endpoints_and_deployments( credential, subscription_id, resource_group_name, workspace_name) for ep in endpoints_and_deployments: if ep.endpoint_name == endpoint_name: if deployment_name is None: return (ep.endpoint_url, ep.endpoint_api_key, ep.default_deployment.model_family) for d in ep.deployments: if d.deployment_name == deployment_name: return (ep.endpoint_url, ep.endpoint_api_key, d.model_family) message = f"""Invalid endpoint and deployment values. Please ensure endpoint name and deployment names are correct, and the deployment was successfull. Could not find endpoint: {endpoint_name} and deployment: {deployment_name}""" raise OpenModelLLMUserError(message=message) def sanitize_endpoint_url(self, endpoint_url: str, api_type: API): if is_serverless_endpoint(endpoint_url): if api_type == API.CHAT: if not endpoint_url.endswith("/v1/chat/completions"): return endpoint_url + "/v1/chat/completions" else: if not endpoint_url.endswith("/v1/completions"): return endpoint_url + "/v1/completions" return endpoint_url def get_endpoint_details(self, subscription_id: str, resource_group_name: str, workspace_name: str, endpoint: str, api_type: API, deployment_name: str = None, **kwargs) -> Tuple[str, str, str]: if self.endpoint_values_in_kwargs(**kwargs): endpoint_url = kwargs["endpoint_url"] endpoint_api_key = kwargs["endpoint_api_key"] model_family = kwargs["model_family"] # clean these up, aka don't send them to MIR del kwargs["endpoint_url"] del kwargs["endpoint_api_key"] del kwargs["model_family"] return (endpoint_url, endpoint_api_key, model_family) (endpoint_connection_type, endpoint_connection_name) = parse_endpoint_connection_type(endpoint) print(f"endpoint_connection_type: {endpoint_connection_type} name: {endpoint_connection_name}", file=sys.stdout) con_type = endpoint_connection_type.lower() if con_type in AUTH_REQUIRED_CONNECTION_TYPES: try: from azure.identity import DefaultAzureCredential credential = DefaultAzureCredential(exclude_interactive_browser_credential=False) token = credential.get_token("https://management.azure.com/.default").token except Exception as e: message = f"""Error encountered while attempting to Authorize access to {endpoint}. Exception: {e}""" print(message, file=sys.stderr) raise OpenModelLLMUserError(message=message) if con_type == "serverlessendpoint": (endpoint_url, endpoint_api_key, model_family) = SERVERLESS_ENDPOINT_CONTAINER.get_serverless_endpoint_key( token, subscription_id, resource_group_name, workspace_name, endpoint_connection_name) elif con_type == "onlineendpoint": (endpoint_url, endpoint_api_key, model_family) = self.get_deployment_from_endpoint( credential, subscription_id, resource_group_name, workspace_name, endpoint_connection_name, deployment_name) elif con_type == "connection": (endpoint_url, endpoint_api_key, model_family) = CUSTOM_CONNECTION_CONTAINER.get_endpoint_from_azure_custom_connection( credential, subscription_id, resource_group_name, workspace_name, endpoint_connection_name) elif con_type == "localconnection": (endpoint_url, endpoint_api_key, model_family) = CUSTOM_CONNECTION_CONTAINER.get_endpoint_from_local_custom_connection( endpoint_connection_name) else: raise OpenModelLLMUserError(message=f"Invalid endpoint connection type: {endpoint_connection_type}") return (self.sanitize_endpoint_url(endpoint_url, api_type), endpoint_api_key, model_family) def endpoint_values_in_kwargs(self, **kwargs): # This is mostly for testing, suggest not using this since security\privacy concerns for the endpoint key if 'endpoint_url' not in kwargs and 'endpoint_api_key' not in kwargs and 'model_family' not in kwargs: return False if 'endpoint_url' not in kwargs or 'endpoint_api_key' not in kwargs or 'model_family' not in kwargs: message = """Endpoint connection via kwargs not fully set. If using kwargs, the following values must be set: endpoint_url, endpoint_api_key, and model_family""" raise OpenModelLLMKeyValidationError(message=message) return True @tool @handle_online_endpoint_error() def call( self, prompt: PromptTemplate, api: API, endpoint_name: str, deployment_name: Optional[str] = None, temperature: Optional[float] = 1.0, max_new_tokens: Optional[int] = 500, top_p: Optional[float] = 1.0, model_kwargs: Optional[Dict] = {}, **kwargs ) -> str: # Sanitize deployment name. Empty deployment name is the same as None. if deployment_name is not None: deployment_name = deployment_name.strip() if not deployment_name or deployment_name == DEPLOYMENT_DEFAULT: deployment_name = None print(f"Executing Open Model LLM Tool for endpoint: '{endpoint_name}', deployment: '{deployment_name}'", file=sys.stdout) (endpoint_url, endpoint_api_key, model_family) = self.get_endpoint_details( subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION", None), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP", None), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME", None), endpoint=endpoint_name, api_type=api, deployment_name=deployment_name, **kwargs) prompt = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs) model_kwargs["top_p"] = top_p model_kwargs["temperature"] = temperature model_kwargs["max_new_tokens"] = max_new_tokens content_formatter = ContentFormatterFactory.get_content_formatter( model_family=model_family, api=api, chat_history=prompt, endpoint_url=endpoint_url ) llm = AzureMLOnlineEndpoint( endpoint_url=endpoint_url, endpoint_api_key=endpoint_api_key, model_family=model_family, content_formatter=content_formatter, deployment_name=deployment_name, model_kwargs=model_kwargs ) return llm(prompt)
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/aoai.py
import json try: from openai import AzureOpenAI as AzureOpenAIClient except Exception: raise Exception( "Please upgrade your OpenAI package to version 1.0.0 or later using the command: pip install --upgrade openai.") from promptflow.tools.common import render_jinja_template, handle_openai_error, parse_chat, to_bool, \ validate_functions, process_function_call, post_process_chat_api_response, normalize_connection_config # Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow' # since the code here is in promptflow namespace as well from promptflow._internal import enable_cache, ToolProvider, tool, register_apis from promptflow.connections import AzureOpenAIConnection from promptflow.contracts.types import PromptTemplate class AzureOpenAI(ToolProvider): def __init__(self, connection: AzureOpenAIConnection): super().__init__() self.connection = connection self._connection_dict = normalize_connection_config(self.connection) self._client = AzureOpenAIClient(**self._connection_dict) def calculate_cache_string_for_completion( self, **kwargs, ) -> str: d = dict(self.connection) d.pop("api_key") d.update({**kwargs}) return json.dumps(d) @tool @handle_openai_error() @enable_cache(calculate_cache_string_for_completion) def completion( self, prompt: PromptTemplate, # for AOAI, deployment name is customized by user, not model name. deployment_name: str, suffix: str = None, max_tokens: int = 16, temperature: float = 1.0, top_p: float = 1.0, n: int = 1, # stream is a hidden to the end user, it is only supposed to be set by the executor. stream: bool = False, logprobs: int = None, echo: bool = False, stop: list = None, presence_penalty: float = 0, frequency_penalty: float = 0, best_of: int = 1, logit_bias: dict = {}, user: str = "", **kwargs, ) -> str: prompt = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs) # TODO: remove below type conversion after client can pass json rather than string. echo = to_bool(echo) stream = to_bool(stream) response = self._client.completions.create( prompt=prompt, model=deployment_name, # empty string suffix should be treated as None. suffix=suffix if suffix else None, max_tokens=int(max_tokens), temperature=float(temperature), top_p=float(top_p), n=int(n), stream=stream, # TODO: remove below type conversion after client pass json rather than string. # empty string will go to else branch, but original api cannot accept empty # string, must be None. logprobs=int(logprobs) if logprobs else None, echo=echo, # fix bug "[] is not valid under any of the given schemas-'stop'" stop=stop if stop else None, presence_penalty=float(presence_penalty), frequency_penalty=float(frequency_penalty), best_of=int(best_of), # Logit bias must be a dict if we passed it to openai api. logit_bias=logit_bias if logit_bias else {}, user=user, extra_headers={"ms-azure-ai-promptflow-called-from": "aoai-tool"}) if stream: def generator(): for chunk in response: if chunk.choices: yield chunk.choices[0].text if hasattr(chunk.choices[0], 'text') and \ chunk.choices[0].text is not None else "" # We must return the generator object, not using yield directly here. # Otherwise, the function itself will become a generator, despite whether stream is True or False. return generator() else: # get first element because prompt is single. return response.choices[0].text @tool @handle_openai_error() def chat( self, prompt: PromptTemplate, # for AOAI, deployment name is customized by user, not model name. deployment_name: str, temperature: float = 1.0, top_p: float = 1.0, n: int = 1, # stream is a hidden to the end user, it is only supposed to be set by the executor. stream: bool = False, stop: list = None, max_tokens: int = None, presence_penalty: float = 0, frequency_penalty: float = 0, logit_bias: dict = {}, user: str = "", # function_call can be of type str or dict. function_call: object = None, functions: list = None, response_format: object = None, **kwargs, ) -> [str, dict]: # keep_trailing_newline=True is to keep the last \n in the prompt to avoid converting "user:\t\n" to "user:". chat_str = render_jinja_template(prompt, trim_blocks=True, keep_trailing_newline=True, **kwargs) messages = parse_chat(chat_str) # TODO: remove below type conversion after client can pass json rather than string. stream = to_bool(stream) params = { "model": deployment_name, "messages": messages, "temperature": float(temperature), "top_p": float(top_p), "n": int(n), "stream": stream, "stop": stop if stop else None, "max_tokens": int(max_tokens) if max_tokens is not None and str(max_tokens).lower() != "inf" else None, "presence_penalty": float(presence_penalty), "frequency_penalty": float(frequency_penalty), "logit_bias": logit_bias, "user": user, "response_format": response_format, "extra_headers": {"ms-azure-ai-promptflow-called-from": "aoai-tool"} } if functions is not None: validate_functions(functions) params["functions"] = functions params["function_call"] = process_function_call(function_call) completion = self._client.chat.completions.create(**params) return post_process_chat_api_response(completion, stream, functions) register_apis(AzureOpenAI) @tool def completion( connection: AzureOpenAIConnection, prompt: PromptTemplate, deployment_name: str, suffix: str = None, max_tokens: int = 16, temperature: float = 1.0, top_p: float = 1, n: int = 1, stream: bool = False, logprobs: int = None, echo: bool = False, stop: list = None, presence_penalty: float = 0, frequency_penalty: float = 0, best_of: int = 1, logit_bias: dict = {}, user: str = "", **kwargs, ) -> str: return AzureOpenAI(connection).completion( prompt=prompt, deployment_name=deployment_name, suffix=suffix, max_tokens=max_tokens, temperature=temperature, top_p=top_p, n=n, stream=stream, logprobs=logprobs, echo=echo, stop=stop if stop else None, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, best_of=best_of, logit_bias=logit_bias, user=user, **kwargs, ) @tool def chat( connection: AzureOpenAIConnection, prompt: PromptTemplate, deployment_name: str, temperature: float = 1, top_p: float = 1, n: int = 1, stream: bool = False, stop: list = None, max_tokens: int = None, presence_penalty: float = 0, frequency_penalty: float = 0, logit_bias: dict = {}, user: str = "", function_call: object = None, functions: list = None, response_format: object = None, **kwargs, ) -> str: # chat model is not available in azure openai, so need to set the environment variable. return AzureOpenAI(connection).chat( prompt=prompt, deployment_name=deployment_name, temperature=temperature, top_p=top_p, n=n, stream=stream, stop=stop if stop else None, max_tokens=max_tokens, presence_penalty=presence_penalty, frequency_penalty=frequency_penalty, logit_bias=logit_bias, user=user, function_call=function_call, functions=functions, response_format=response_format, **kwargs, )
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/template_rendering.py
# Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow' # since the code here is in promptflow namespace as well from promptflow._internal import tool from promptflow.tools.common import render_jinja_template @tool def render_template_jinja2(template: str, **kwargs) -> str: return render_jinja_template(template, trim_blocks=True, keep_trailing_newline=True, **kwargs)
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/serpapi.py
import json import sys from enum import Enum import requests # Avoid circular dependencies: Use import 'from promptflow._internal' instead of 'from promptflow' # since the code here is in promptflow namespace as well from promptflow._internal import ToolProvider, tool from promptflow.connections import SerpConnection from promptflow.exceptions import PromptflowException from promptflow.tools.exception import SerpAPIUserError, SerpAPISystemError class SafeMode(str, Enum): ACTIVE = "active" OFF = "off" class Engine(str, Enum): GOOGLE = "google" BING = "bing" class SerpAPI(ToolProvider): def __init__(self, connection: SerpConnection): super().__init__() self.connection = connection def extract_error_message_from_json(self, error_data): error_message = "" # For request was rejected. For example, the api_key is not valid if "error" in error_data: error_message = error_data["error"] return str(error_message) def safe_extract_error_message(self, response): default_error_message = f"SerpAPI search request failed: {response.text}" try: # Keep the same style as SerpAPIClient error_data = json.loads(response.text) print(f"Response text json: {json.dumps(error_data)}", file=sys.stderr) error_message = self.extract_error_message_from_json(error_data) error_message = error_message if len(error_message) > 0 else default_error_message return error_message except Exception as e: # Swallow any exception when extract detailed error message print( f"Unexpected exception occurs while extract error message " f"from response: {type(e).__name__}: {str(e)}", file=sys.stderr, ) return default_error_message # flake8: noqa: C901 @tool def search( self, query: str, # this is required location: str = None, safe: SafeMode = SafeMode.OFF, # Not default to be SafeMode.OFF num: int = 10, engine: Engine = Engine.GOOGLE, # this is required ): from serpapi import SerpApiClient # required parameters. https://serpapi.com/search-api. params = { "q": query, "location": location, "api_key": self.connection.api_key, } if isinstance(engine, Engine): params["engine"] = engine.value else: params["engine"] = engine if safe == SafeMode.ACTIVE: # Ingore invalid value and safe="off" (as default) # For bing and google, they use diff parameters if params["engine"].lower() == "google": params["safe"] = "Active" else: params["safeSearch"] = "Strict" if int(num) > 0: # to combine multiple engines together, we use "num" as the parameter for such purpose if params["engine"].lower() == "google": params["num"] = int(num) else: params["count"] = int(num) search = SerpApiClient(params) # get response try: response = search.get_response() if response.status_code == requests.codes.ok: # default output is json return json.loads(response.text) else: # Step I: Try to get accurate error message at best error_message = self.safe_extract_error_message(response) # Step II: Construct PromptflowException if response.status_code >= 500: raise SerpAPISystemError(message=error_message) else: raise SerpAPIUserError(message=error_message) except Exception as e: # SerpApi is super robust. Set basic error handle if not isinstance(e, PromptflowException): print(f"Unexpected exception occurs: {type(e).__name__}: {str(e)}", file=sys.stderr) error_message = f"SerpAPI search request failed: {type(e).__name__}: {str(e)}" raise SerpAPISystemError(message=error_message) raise @tool def search( connection: SerpConnection, query: str, # this is required location: str = None, safe: SafeMode = SafeMode.OFF, # Not default to be SafeMode.OFF num: int = 10, engine: Engine = Engine.GOOGLE, # this is required ): return SerpAPI(connection).search( query=query, location=location, safe=safe, num=num, engine=engine, )
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/yamls/serpapi.yaml
promptflow.tools.serpapi.SerpAPI.search: name: Serp API description: Use Serp API to obtain search results from a specific search engine. inputs: connection: type: - SerpConnection engine: default: google enum: - google - bing type: - string location: default: '' type: - string num: default: '10' type: - int query: type: - string safe: default: 'off' enum: - active - 'off' type: - string type: python module: promptflow.tools.serpapi class_name: SerpAPI function: search
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/yamls/openai_gpt4v.yaml
promptflow.tools.openai_gpt4v.OpenAI.chat: name: OpenAI GPT-4V description: Use OpenAI GPT-4V to leverage vision ability. type: custom_llm module: promptflow.tools.openai_gpt4v class_name: OpenAI function: chat tool_state: preview icon: light: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAx0lEQVR4nJWSwQ2CQBBFX0jAcjgqXUgPJNiIsQQrIVCIFy8GC6ABDcGDX7Mus9n1Xz7zZ+fPsLPwH4bUg0dD2wMPcbR48Uxq4AKU4iSTDwZ1LhWXipN/B3V0J6hjBTvgLHZNonewBXrgDpzEvXSIjN0BE3AACmmF4kl5F6tNzcCoLpW0SvGovFvsb4oZ2AANcAOu4ka6axCcINN3rg654sww+CYsPD0OwjcozFNh/Qcd78tqVbCIW+n+Fky472Bh/Q6SYb1EEy8tDzd+9IsVPAAAAABJRU5ErkJggg== dark: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA2ElEQVR4nJXSzW3CQBAF4DUSTjk+Al1AD0ikESslpBIEheRALhEpgAYSWV8OGUublf/yLuP3PPNmdndS+gdwXZrYDmh7fGE/W+wXbaYd8IYm4rxJPnZ0boI3wZcdJxs/n+AwV7DFK7aFyfQdYIMLPvES8YJNf5yp4jMeeEYdWh38gXOR35YGHe5xabvQdsHv6PLi8qV6gycc8YH3iMfQu6Lh4ASr+F5Hh3XwVWnQYzUkVlX1nccplAb1SN6Y/sfgmlK64VS8wimldIv/0yj2QLkHizG0iWP4AVAfQ34DVQONAAAAAElFTkSuQmCC default_prompt: | # system: As an AI assistant, your task involves interpreting images and responding to questions about the image. Remember to provide accurate answers based on the information present in the image. # user: Can you tell me what the image depicts? ![image]({{image_input}}) inputs: connection: type: - OpenAIConnection model: enum: - gpt-4-vision-preview allow_manual_entry: true type: - string temperature: default: 1 type: - double top_p: default: 1 type: - double max_tokens: default: 512 type: - int stop: default: "" type: - list presence_penalty: default: 0 type: - double frequency_penalty: default: 0 type: - double
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/yamls/embedding.yaml
promptflow.tools.embedding.embedding: name: Embedding description: Use Open AI's embedding model to create an embedding vector representing the input text. type: python module: promptflow.tools.embedding function: embedding inputs: connection: type: [AzureOpenAIConnection, OpenAIConnection] deployment_name: type: - string enabled_by: connection enabled_by_type: [AzureOpenAIConnection] capabilities: completion: false chat_completion: false embeddings: true model_list: - text-embedding-ada-002 - text-search-ada-doc-001 - text-search-ada-query-001 model: type: - string enabled_by: connection enabled_by_type: [OpenAIConnection] enum: - text-embedding-ada-002 - text-search-ada-doc-001 - text-search-ada-query-001 allow_manual_entry: true input: type: - string
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/yamls/aoai_gpt4v.yaml
promptflow.tools.aoai_gpt4v.AzureOpenAI.chat: name: Azure OpenAI GPT-4 Turbo with Vision description: Use Azure OpenAI GPT-4 Turbo with Vision to leverage AOAI vision ability. type: custom_llm module: promptflow.tools.aoai_gpt4v class_name: AzureOpenAI function: chat tool_state: preview icon: light: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAx0lEQVR4nJWSwQ2CQBBFX0jAcjgqXUgPJNiIsQQrIVCIFy8GC6ABDcGDX7Mus9n1Xz7zZ+fPsLPwH4bUg0dD2wMPcbR48Uxq4AKU4iSTDwZ1LhWXipN/B3V0J6hjBTvgLHZNonewBXrgDpzEvXSIjN0BE3AACmmF4kl5F6tNzcCoLpW0SvGovFvsb4oZ2AANcAOu4ka6axCcINN3rg654sww+CYsPD0OwjcozFNh/Qcd78tqVbCIW+n+Fky472Bh/Q6SYb1EEy8tDzd+9IsVPAAAAABJRU5ErkJggg== dark: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAA2ElEQVR4nJXSzW3CQBAF4DUSTjk+Al1AD0ikESslpBIEheRALhEpgAYSWV8OGUublf/yLuP3PPNmdndS+gdwXZrYDmh7fGE/W+wXbaYd8IYm4rxJPnZ0boI3wZcdJxs/n+AwV7DFK7aFyfQdYIMLPvES8YJNf5yp4jMeeEYdWh38gXOR35YGHe5xabvQdsHv6PLi8qV6gycc8YH3iMfQu6Lh4ASr+F5Hh3XwVWnQYzUkVlX1nccplAb1SN6Y/sfgmlK64VS8wimldIv/0yj2QLkHizG0iWP4AVAfQ34DVQONAAAAAElFTkSuQmCC default_prompt: | # system: As an AI assistant, your task involves interpreting images and responding to questions about the image. Remember to provide accurate answers based on the information present in the image. # user: Can you tell me what the image depicts? ![image]({{image_input}}) inputs: connection: type: - AzureOpenAIConnection deployment_name: type: - string temperature: default: 1 type: - double top_p: default: 1 type: - double max_tokens: default: 512 type: - int stop: default: "" type: - list presence_penalty: default: 0 type: - double frequency_penalty: default: 0 type: - double
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/yamls/open_model_llm.yaml
promptflow.tools.open_model_llm.OpenModelLLM.call: name: Open Model LLM description: Use an open model from the Azure Model catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion API calls. icon: data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACgElEQVR4nGWSz2vcVRTFP/e9NzOZ1KDGohASslLEH6VLV0ak4l/QpeDCrfQPcNGliODKnVm4EBdBsIjQIlhciKW0ycKFVCSNbYnjdDLtmPnmO/nO9917XcxMkjYX3uLx7nnn3HOuMK2Nix4fP78ZdrYXVkLVWjf3l3B1B+HpcjzGFtmqa6cePz7/x0dnn1n5qhj3iBJPYREIURAJuCtpY8PjReDbrf9WG7H1fuefwQU9qKztTcMJT+PNnEFvjGVDBDlSsH6p/9MLzy6+NxwVqI8RAg4IPmWedMckdLYP6O6UpIaQfvyyXG012+e79/ZfHukoS1ISMT2hGTB1RkUmNgQ5QZ0w+a2VWDq73MbdEWmfnnv6UWe7oNzPaLapl5CwuLTXK9WUGBuCjqekzhP+z52ZXOrKMD3OJg0Hh778aiOuvpnYvp05d6GJO4iAO4QAe/eV36/X5LFRV4Zmn+AdkqlL8Vjp3oVioOz+WTPzzYEgsN+fgPLYyJVheSbPPVl2ikeGZRjtG52/8rHuaV9VOlpP2OtKyVndcRVCSqOhsvxa4vW359i6OuKdD+aP8Q4SYPdOzS/flGjt1JUSaMqZ5nwa1Y8qWb/Ud/eZZkHisYezEM0m+fcelDr8F1SqW2LNK6r1jXQwyLzy1hxvrLXZulry7ocL+FS6G4QIu3fG/Px1gdYeW7LIgXU2P/115TOA5G7e3Rmj2aS/m7l5pThiZzrCcE/d1XHzbln373nw7y6veeoUm5KCNKT/IPPwbiY1hYd/l5MIT65BMFt87sU4v9D7/JMflr44uV6hGh1+L4RCkg6z5iK2tAhNLeLsNGwYA4fDYnC/drvuuFxe86NV/x+Ut27g0FvykgAAAABJRU5ErkJggg== type: custom_llm module: promptflow.tools.open_model_llm class_name: OpenModelLLM function: call inputs: endpoint_name: type: - string dynamic_list: func_path: promptflow.tools.open_model_llm.list_endpoint_names allow_manual_entry: true # Allow the user to clear this field is_multi_select: false deployment_name: default: '' type: - string dynamic_list: func_path: promptflow.tools.open_model_llm.list_deployment_names func_kwargs: - name: endpoint type: - string optional: true reference: ${inputs.endpoint} allow_manual_entry: true is_multi_select: false api: enum: - chat - completion type: - string temperature: default: 1.0 type: - double max_new_tokens: default: 500 type: - int top_p: default: 1.0 advanced: true type: - double model_kwargs: default: "{}" advanced: true type: - object
0
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools
promptflow_repo/promptflow/src/promptflow-tools/promptflow/tools/yamls/azure_content_safety.yaml
promptflow.tools.azure_content_safety.analyze_text: module: promptflow.tools.azure_content_safety function: analyze_text inputs: connection: type: - AzureContentSafetyConnection hate_category: default: medium_sensitivity enum: - disable - low_sensitivity - medium_sensitivity - high_sensitivity type: - string self_harm_category: default: medium_sensitivity enum: - disable - low_sensitivity - medium_sensitivity - high_sensitivity type: - string sexual_category: default: medium_sensitivity enum: - disable - low_sensitivity - medium_sensitivity - high_sensitivity type: - string text: type: - string violence_category: default: medium_sensitivity enum: - disable - low_sensitivity - medium_sensitivity - high_sensitivity type: - string name: Content Safety (Text Analyze) description: Use Azure Content Safety to detect harmful content. type: python deprecated_tools: - content_safety_text.tools.content_safety_text_tool.analyze_text
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_embedding.py
import pytest from promptflow.tools.embedding import embedding from promptflow.tools.exception import InvalidConnectionType @pytest.mark.usefixtures("use_secrets_config_file") class TestEmbedding: def test_embedding_conn_aoai(self, azure_open_ai_connection): result = embedding( connection=azure_open_ai_connection, input="The food was delicious and the waiter", deployment_name="text-embedding-ada-002") assert len(result) == 1536 @pytest.mark.skip_if_no_api_key("open_ai_connection") def test_embedding_conn_oai(self, open_ai_connection): result = embedding( connection=open_ai_connection, input="The food was delicious and the waiter", model="text-embedding-ada-002") assert len(result) == 1536 def test_embedding_invalid_connection_type(self, serp_connection): error_codes = "UserError/ToolValidationError/InvalidConnectionType" with pytest.raises(InvalidConnectionType) as exc_info: embedding(connection=serp_connection, input="hello", deployment_name="text-embedding-ada-002") assert exc_info.value.error_codes == error_codes.split("/")
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_open_model_llm.py
import copy import os import pytest import re from azure.identity import DefaultAzureCredential from typing import List, Dict from promptflow.tools.exception import ( OpenModelLLMUserError, OpenModelLLMKeyValidationError ) from promptflow.tools.open_model_llm import ( OpenModelLLM, API, ContentFormatterBase, LlamaContentFormatter, list_endpoint_names, list_deployment_names, CustomConnectionsContainer, get_model_type, ModelFamily, ServerlessEndpointsContainer ) def validate_response(response): assert len(response) > 15 def verify_prompt_role_delimiters(message: str, codes: List[str]): assert codes == "UserError/OpenModelLLMUserError".split("/") message_pattern = re.compile( r"The Chat API requires a specific format for prompt definition, and the prompt should include separate " + r"lines as role delimiters: ('(assistant|user|system):\\n'[,.]){3} Current parsed role 'the quick brown" + r" fox' does not meet the requirement. If you intend to use the " + r"Completion API, please select the appropriate API type and deployment name. If you do intend to use the " + r"Chat API, please refer to the guideline at https://aka.ms/pfdoc/chat-prompt or view the samples in our " + r"gallery that contain 'Chat' in the name.") is_match = message_pattern.match(message) assert is_match @pytest.fixture def verify_service_endpoints(open_model_llm_ws_service_connection) -> Dict[str, List[str]]: if not open_model_llm_ws_service_connection: pytest.skip("Service Credential not available") print("open_model_llm_ws_service_connection completed") required_env_vars = ["AZUREML_ARM_SUBSCRIPTION", "AZUREML_ARM_RESOURCEGROUP", "AZUREML_ARM_WORKSPACE_NAME", "AZURE_CLIENT_ID", "AZURE_TENANT_ID", "AZURE_CLIENT_SECRET"] for rev in required_env_vars: if rev not in os.environ: raise Exception(f"test not setup correctly. Missing Required Environment Variable:{rev}") @pytest.fixture def endpoints_provider(verify_service_endpoints) -> Dict[str, List[str]]: from azure.ai.ml import MLClient credential = DefaultAzureCredential(exclude_interactive_browser_credential=False) ml_client = MLClient( credential=credential, subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME")) endpoints = {} for ep in ml_client.online_endpoints.list(): endpoints[ep.name] = [d.name for d in ml_client.online_deployments.list(ep.name)] return endpoints @pytest.fixture def chat_endpoints_provider(endpoints_provider: Dict[str, List[str]]) -> Dict[str, List[str]]: chat_endpoint_names = ["gpt2", "llama-chat"] chat_endpoints = {} for key, value in endpoints_provider.items(): for ep_name in chat_endpoint_names: if ep_name in key: chat_endpoints[key] = value if len(chat_endpoints) <= 0: pytest.skip("No Chat Endpoints Found") return chat_endpoints @pytest.fixture def completion_endpoints_provider(endpoints_provider: Dict[str, List[str]]) -> Dict[str, List[str]]: completion_endpoint_names = ["gpt2", "llama-comp"] completion_endpoints = {} for key, value in endpoints_provider.items(): for ep_name in completion_endpoint_names: if ep_name in key: completion_endpoints[key] = value if len(completion_endpoints) <= 0: pytest.skip("No Completion Endpoints Found") return completion_endpoints @pytest.mark.usefixtures("use_secrets_config_file") class TestOpenModelLLM: stateless_os_llm = OpenModelLLM() gpt2_connection = "connection/gpt2_connection" llama_connection = "connection/llama_chat_connection" llama_serverless_connection = "connection/llama_chat_serverless" completion_prompt = "The quick brown fox" chat_prompt = """system: * You are a AI which helps Customers complete a sentence. * Your answer should complete the provided prompt. * Your answer should be followed by a discussion of the meaning. * The discussion part of your answer must be long and detailed. user: """ + completion_prompt def test_open_model_llm_completion(self, verify_service_endpoints): response = self.stateless_os_llm.call( self.completion_prompt, API.COMPLETION, endpoint_name=self.gpt2_connection) validate_response(response) def test_open_model_llm_completion_with_deploy(self, verify_service_endpoints): response = self.stateless_os_llm.call( self.completion_prompt, API.COMPLETION, endpoint_name=self.gpt2_connection, deployment_name="gpt2-10") validate_response(response) def test_open_model_llm_chat(self, verify_service_endpoints): response = self.stateless_os_llm.call( self.chat_prompt, API.CHAT, endpoint_name=self.gpt2_connection) validate_response(response) def test_open_model_llm_chat_with_deploy(self, verify_service_endpoints): response = self.stateless_os_llm.call( self.chat_prompt, API.CHAT, endpoint_name=self.gpt2_connection, deployment_name="gpt2-10") validate_response(response) def test_open_model_llm_chat_with_max_length(self, verify_service_endpoints): response = self.stateless_os_llm.call( self.chat_prompt, API.CHAT, endpoint_name=self.gpt2_connection, max_new_tokens=30) # GPT-2 doesn't take this parameter validate_response(response) @pytest.mark.skip_if_no_api_key("gpt2_custom_connection") def test_open_model_llm_con_url_chat(self, gpt2_custom_connection): tmp = copy.deepcopy(gpt2_custom_connection) del tmp.configs['endpoint_url'] with pytest.raises(OpenModelLLMKeyValidationError) as exc_info: customConnectionsContainer = CustomConnectionsContainer() customConnectionsContainer.get_endpoint_from_custom_connection(connection=tmp) assert exc_info.value.message == """Required key `endpoint_url` not found in given custom connection. Required keys are: endpoint_url,model_family.""" assert exc_info.value.error_codes == "UserError/ToolValidationError/OpenModelLLMKeyValidationError".split("/") @pytest.mark.skip_if_no_api_key("gpt2_custom_connection") def test_open_model_llm_con_key_chat(self, gpt2_custom_connection): tmp = copy.deepcopy(gpt2_custom_connection) del tmp.secrets['endpoint_api_key'] with pytest.raises(OpenModelLLMKeyValidationError) as exc_info: customConnectionsContainer = CustomConnectionsContainer() customConnectionsContainer.get_endpoint_from_custom_connection(connection=tmp) assert exc_info.value.message == ( "Required secret key `endpoint_api_key` " + """not found in given custom connection. Required keys are: endpoint_api_key.""") assert exc_info.value.error_codes == "UserError/ToolValidationError/OpenModelLLMKeyValidationError".split("/") @pytest.mark.skip_if_no_api_key("gpt2_custom_connection") def test_open_model_llm_con_model_chat(self, gpt2_custom_connection): tmp = copy.deepcopy(gpt2_custom_connection) del tmp.configs['model_family'] with pytest.raises(OpenModelLLMKeyValidationError) as exc_info: customConnectionsContainer = CustomConnectionsContainer() customConnectionsContainer.get_endpoint_from_custom_connection(connection=tmp) assert exc_info.value.message == """Required key `model_family` not found in given custom connection. Required keys are: endpoint_url,model_family.""" assert exc_info.value.error_codes == "UserError/ToolValidationError/OpenModelLLMKeyValidationError".split("/") def test_open_model_llm_escape_chat(self): danger = r"The quick \brown fox\tjumped\\over \the \\boy\r\n" out_of_danger = ContentFormatterBase.escape_special_characters(danger) assert out_of_danger == "The quick \\brown fox\\tjumped\\\\over \\the \\\\boy\\r\\n" def test_open_model_llm_llama_parse_chat_with_chat(self): LlamaContentFormatter.parse_chat(self.chat_prompt) def test_open_model_llm_llama_parse_multi_turn(self): multi_turn_chat = """user: You are a AI which helps Customers answer questions. What is the best movie of all time? assistant: Mobius, which starred Jared Leto user: Why was that the greatest movie of all time? """ LlamaContentFormatter.parse_chat(multi_turn_chat) def test_open_model_llm_llama_parse_ignore_whitespace(self): bad_chat_prompt = f"""system: You are a AI which helps Customers answer questions. user: user: {self.completion_prompt}""" with pytest.raises(OpenModelLLMUserError) as exc_info: LlamaContentFormatter.parse_chat(bad_chat_prompt) verify_prompt_role_delimiters(exc_info.value.message, exc_info.value.error_codes) def test_open_model_llm_llama_parse_chat_with_comp(self): with pytest.raises(OpenModelLLMUserError) as exc_info: LlamaContentFormatter.parse_chat(self.completion_prompt) verify_prompt_role_delimiters(exc_info.value.message, exc_info.value.error_codes) def test_open_model_llm_chat_endpoint_name(self, chat_endpoints_provider): for endpoint_name in chat_endpoints_provider: response = self.stateless_os_llm.call( self.chat_prompt, API.CHAT, endpoint_name=f"onlineEndpoint/{endpoint_name}") validate_response(response) def test_open_model_llm_chat_endpoint_name_with_deployment(self, chat_endpoints_provider): for endpoint_name in chat_endpoints_provider: for deployment_name in chat_endpoints_provider[endpoint_name]: response = self.stateless_os_llm.call( self.chat_prompt, API.CHAT, endpoint_name=f"onlineEndpoint/{endpoint_name}", deployment_name=deployment_name) validate_response(response) def test_open_model_llm_completion_endpoint_name(self, completion_endpoints_provider): for endpoint_name in completion_endpoints_provider: response = self.stateless_os_llm.call( self.completion_prompt, API.COMPLETION, endpoint_name=f"onlineEndpoint/{endpoint_name}") validate_response(response) def test_open_model_llm_completion_endpoint_name_with_deployment(self, completion_endpoints_provider): for endpoint_name in completion_endpoints_provider: for deployment_name in completion_endpoints_provider[endpoint_name]: response = self.stateless_os_llm.call( self.completion_prompt, API.COMPLETION, endpoint_name=f"onlineEndpoint/{endpoint_name}", deployment_name=deployment_name) validate_response(response) def test_open_model_llm_llama_chat(self, verify_service_endpoints): response = self.stateless_os_llm.call(self.chat_prompt, API.CHAT, endpoint_name=self.llama_connection) validate_response(response) def test_open_model_llm_llama_serverless(self, verify_service_endpoints): response = self.stateless_os_llm.call( self.chat_prompt, API.CHAT, endpoint_name=self.llama_serverless_connection) validate_response(response) def test_open_model_llm_llama_chat_history(self, verify_service_endpoints): chat_history_prompt = """system: * Given the following conversation history and the users next question, answer the next question. * If the conversation is irrelevant or empty, acknowledge and ask for more input. * Do not add more details than necessary to the question. chat history: {% for item in chat_history %} user: {{ item.inputs.chat_input }} assistant: {{ item.outputs.chat_output }} {% endfor %} user: {{ chat_input }}""" response = self.stateless_os_llm.call( chat_history_prompt, API.CHAT, endpoint_name=self.llama_connection, chat_history=[ { "inputs": { "chat_input": "Hi" }, "outputs": { "chat_output": "Hello! How can I assist you today?" } }, { "inputs": { "chat_input": "What is Azure compute instance?" }, "outputs": { "chat_output": "An Azure Machine Learning compute instance is a fully managed cloud-based" + " workstation for data scientists. It provides a pre-configured and managed development" + " environment in the cloud for machine learning. Compute instances can also be used as a" + " compute target for training and inferencing for development and testing purposes. They" + " have a job queue, run jobs securely in a virtual network environment, and can run" + " multiple small jobs in parallel. Additionally, compute instances support single-node" + " multi-GPU distributed training jobs." } } ], chat_input="Sorry I didn't follow, could you say that again?") validate_response(response) def test_open_model_llm_dynamic_list_ignore_deployment(self, verify_service_endpoints): deployments = list_deployment_names( subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"), endpoint=None) assert len(deployments) == 1 assert deployments[0]['value'] == 'default' deployments = list_deployment_names( subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"), endpoint='') assert len(deployments) == 1 assert deployments[0]['value'] == 'default' deployments = list_deployment_names( subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"), endpoint='fake_endpoint name') assert len(deployments) == 1 assert deployments[0]['value'] == 'default' def test_open_model_llm_dynamic_list_serverless_test(self, verify_service_endpoints): subscription_id = os.getenv("AZUREML_ARM_SUBSCRIPTION") resource_group_name = os.getenv("AZUREML_ARM_RESOURCEGROUP") workspace_name = os.getenv("AZUREML_ARM_WORKSPACE_NAME") se_container = ServerlessEndpointsContainer() credential = DefaultAzureCredential(exclude_interactive_browser_credential=False) token = credential.get_token("https://management.azure.com/.default").token eps = se_container.list_serverless_endpoints( token, subscription_id, resource_group_name, workspace_name) if len(eps) == 0: pytest.skip("Service Credential not available") endpoint_connection_name = eps[0]["value"].replace("serverlessEndpoint/", "") eps_keys = se_container._list_endpoint_key( token, subscription_id, resource_group_name, workspace_name, endpoint_connection_name ) assert len(eps_keys) == 2 (endpoint_url, endpoint_key, model_family) = se_container.get_serverless_endpoint_key( token, subscription_id, resource_group_name, workspace_name, endpoint_connection_name) assert len(endpoint_url) > 20 assert model_family == "LLaMa" assert endpoint_key == eps_keys['primaryKey'] def test_open_model_llm_dynamic_list_custom_connections_test(self, verify_service_endpoints): custom_container = CustomConnectionsContainer() credential = DefaultAzureCredential(exclude_interactive_browser_credential=False) connections = custom_container.list_custom_connection_names( credential, subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME")) assert len(connections) > 1 def test_open_model_llm_dynamic_list_happy_path(self, verify_service_endpoints): endpoints = list_endpoint_names( subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"), return_endpoint_url=True ) # we might want to remove this or skip if there are zero endpoints in the long term. # currently we have low cost compute for a GPT2 endpoint, so if nothing else this should be available. assert len(endpoints) > 0 for endpoint in endpoints: assert "value" in endpoint assert "display_value" in endpoint assert "description" in endpoint from tests.utils import verify_url_exists for endpoint in endpoints: if "localConnection/" in endpoint['value'] or not verify_url_exists(endpoint["url"]): continue is_chat = "serverless" in endpoint['value'] or "chat" in endpoint['value'] if is_chat: prompt = self.chat_prompt api_type = API.CHAT else: prompt = self.completion_prompt api_type = API.COMPLETION # test with default endpoint response = self.stateless_os_llm.call( prompt, api_type, endpoint_name=endpoint['value'], max_new_tokens=30, model_kwargs={}) validate_response(response) deployments = list_deployment_names( subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"), endpoint=endpoint['value']) if "onlineEndpoint" in endpoint['value']: assert len(deployments) > 0 else: assert len(deployments) == 1 assert deployments[0]['value'] == 'default' continue for deployment in deployments: response = self.stateless_os_llm.call( prompt, api_type, endpoint_name=endpoint['value'], deployment_name=deployment['value'], max_new_tokens=30, model_kwargs={}) validate_response(response) def test_open_model_llm_get_model_llama(self): model_assets = [ "azureml://registries/azureml-meta/models/Llama-2-7b-chat/versions/14", "azureml://registries/azureml-meta/models/Llama-2-7b/versions/12", "azureml://registries/azureml-meta/models/Llama-2-13b-chat/versions/12", "azureml://registries/azureml-meta/models/Llama-2-13b/versions/12", "azureml://registries/azureml-meta/models/Llama-2-70b-chat/versions/12", "azureml://registries/azureml-meta/models/Llama-2-70b/versions/13" ] for asset_name in model_assets: assert ModelFamily.LLAMA == get_model_type(asset_name) def test_open_model_llm_get_model_gpt2(self): model_assets = [ "azureml://registries/azureml-staging/models/gpt2/versions/9", "azureml://registries/azureml/models/gpt2/versions/9", "azureml://registries/azureml/models/gpt2-medium/versions/11", "azureml://registries/azureml/models/gpt2-large/versions/11" ] for asset_name in model_assets: assert ModelFamily.GPT2 == get_model_type(asset_name) def test_open_model_llm_get_model_dolly(self): model_assets = [ "azureml://registries/azureml/models/databricks-dolly-v2-12b/versions/11" ] for asset_name in model_assets: assert ModelFamily.DOLLY == get_model_type(asset_name) def test_open_model_llm_get_model_falcon(self): model_assets = [ "azureml://registries/azureml/models/tiiuae-falcon-40b/versions/2", "azureml://registries/azureml/models/tiiuae-falcon-40b/versions/2" ] for asset_name in model_assets: assert ModelFamily.FALCON == get_model_type(asset_name) def test_open_model_llm_get_model_failure_cases(self): bad_model_assets = [ "azureml://registries/azureml-meta/models/CodeLlama-7b-Instruct-hf/versions/3", "azureml://registries/azureml-staging/models/gpt-2/versions/9", "azureml://registries/azureml/models/falcon-40b/versions/2", "azureml://registries/azureml-meta/models/Llama-70b/versions/13", "azureml://registries/azureml/models/openai-whisper-large/versions/14", "azureml://registries/azureml/models/ask-wikipedia/versions/2", "definitely not real", "", "ausreml://registries/azureml/models/ask-wikipedia/versions/2", "azureml://registries/azureml/models/ask-wikipedia/version/2", "azureml://registries/azureml/models/ask-wikipedia/version/" ] for asset_name in bad_model_assets: val = get_model_type(asset_name) assert val is None def test_open_model_llm_local_connection(self, verify_service_endpoints, gpt2_custom_connection): endpoints = list_endpoint_names( subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"), return_endpoint_url=True ) import uuid connection_name = f"test_local_connection_{uuid.uuid4()}" for e in endpoints: assert e['value'] != connection_name from promptflow._sdk.entities import CustomConnection connection = CustomConnection(name=connection_name, configs={ "endpoint_url": gpt2_custom_connection.configs['endpoint_url'], "model_family": gpt2_custom_connection.configs['model_family']}, secrets={ "endpoint_api_key": gpt2_custom_connection.secrets['endpoint_api_key']}) from promptflow import PFClient as LocalPFClient pf_client = LocalPFClient() pf_client.connections.create_or_update(connection) endpoints = list_endpoint_names( subscription_id=os.getenv("AZUREML_ARM_SUBSCRIPTION"), resource_group_name=os.getenv("AZUREML_ARM_RESOURCEGROUP"), workspace_name=os.getenv("AZUREML_ARM_WORKSPACE_NAME"), force_refresh=True ) found = False target_connection_name = f"localConnection/{connection_name}" for e in endpoints: if e['value'] == target_connection_name: found = True break assert found response = self.stateless_os_llm.call( self.completion_prompt, API.COMPLETION, endpoint_name=target_connection_name) validate_response(response) def test_open_model_llm_package(self): import pkg_resources # Promptflow-tools is not installed in the test pipeline, so we'll skip this test there. Works locally. try: pkg_resources.get_distribution("promptflow-tools") except pkg_resources.DistributionNotFound: pytest.skip("promptflow-tools not installed") found = False target_tool_identifier = "promptflow.tools.open_model_llm.OpenModelLLM.call" for entry_point in pkg_resources.iter_entry_points(group="package_tools"): list_tool_func = entry_point.resolve() package_tools = list_tool_func() for identifier, tool in package_tools.items(): if identifier == target_tool_identifier: import importlib importlib.import_module(tool["module"]) # Import the module to ensure its validity assert not found found = True assert found
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/pytest.ini
[pytest] markers = skip_if_no_api_key: skip the test if actual api key is not provided.
0
promptflow_repo/promptflow/src/promptflow-tools
promptflow_repo/promptflow/src/promptflow-tools/tests/test_serpapi.py
import pytest from promptflow.exceptions import UserErrorException from promptflow.tools.serpapi import Engine, SafeMode, search import tests.utils as utils @pytest.mark.usefixtures("use_secrets_config_file") @pytest.mark.skip_if_no_api_key("serp_connection") class TestSerpAPI: def test_engine(self, serp_connection): query = "cute cats" num = 2 result_dict = search( connection=serp_connection, query=query, num=num, safe=SafeMode.ACTIVE, engine=Engine.GOOGLE.value) utils.is_json_serializable(result_dict, "serp api search()") assert result_dict["search_metadata"]["google_url"] is not None assert int(result_dict["search_parameters"]["num"]) == num assert result_dict["search_parameters"]["safe"].lower() == "active" result_dict = search( connection=serp_connection, query=query, num=num, safe=SafeMode.ACTIVE, engine=Engine.BING.value) utils.is_json_serializable(result_dict, "serp api search()") assert int(result_dict["search_parameters"]["count"]) == num assert result_dict["search_parameters"]["safe_search"].lower() == "strict" def test_invalid_api_key(self, serp_connection): serp_connection.api_key = "hello" query = "cute cats" num = 2 engine = Engine.GOOGLE.value error_msg = "Invalid API key. Your API key should be here: https://serpapi.com/manage-api-key" with pytest.raises(UserErrorException) as exc_info: search(connection=serp_connection, query=query, num=num, engine=engine) assert error_msg == exc_info.value.args[0] @pytest.mark.parametrize("engine", [Engine.GOOGLE.value, Engine.BING.value]) def test_invalid_query(self, serp_connection, engine): query = "" num = 2 error_msg = "Missing query `q` parameter." with pytest.raises(UserErrorException) as exc_info: search(connection=serp_connection, query=query, num=num, engine=engine) assert error_msg == exc_info.value.args[0]
0