Create prompt_chain.py
Browse files- prompt_chain.py +114 -0
prompt_chain.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, Any, List
|
2 |
+
import yaml
|
3 |
+
|
4 |
+
class SimplePromptChain:
|
5 |
+
"""A flexible prompt chain implementation using an AIAssistant wrapper."""
|
6 |
+
|
7 |
+
def __init__(self, assistant: AIAssistant, prompts_path: str):
|
8 |
+
"""
|
9 |
+
Initialize chain with AI assistant and prompts.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
assistant: Configured AIAssistant instance
|
13 |
+
prompts_path: Path to YAML prompts file
|
14 |
+
"""
|
15 |
+
self.assistant = assistant
|
16 |
+
self.prompts = PromptLoader.load_prompts(prompts_path)
|
17 |
+
|
18 |
+
def execute_step(self,
|
19 |
+
prompt_name: str,
|
20 |
+
generation_params: Dict[str, Any] = None,
|
21 |
+
variables: Dict[str, Any] = None) -> str:
|
22 |
+
"""
|
23 |
+
Execute single chain step using the AI assistant.
|
24 |
+
|
25 |
+
Args:
|
26 |
+
prompt_name: Name of prompt template to use
|
27 |
+
generation_params: Optional parameters for generation
|
28 |
+
variables: Variables to format the prompt
|
29 |
+
|
30 |
+
Returns:
|
31 |
+
Processed response content
|
32 |
+
|
33 |
+
Raises:
|
34 |
+
ValueError: If prompt template not found
|
35 |
+
"""
|
36 |
+
# Validate prompt exists
|
37 |
+
if prompt_name not in self.prompts:
|
38 |
+
raise ValueError(f"Prompt '{prompt_name}' not found in loaded templates")
|
39 |
+
|
40 |
+
prompt_template = self.prompts[prompt_name]
|
41 |
+
|
42 |
+
try:
|
43 |
+
# Generate response using assistant
|
44 |
+
response = self.assistant.generate_response(
|
45 |
+
prompt_template=prompt_template,
|
46 |
+
generation_params=generation_params,
|
47 |
+
stream=True,
|
48 |
+
**variables or {}
|
49 |
+
)
|
50 |
+
|
51 |
+
# Extract and return content from response
|
52 |
+
return response.choices[0].message.content
|
53 |
+
|
54 |
+
except Exception as e:
|
55 |
+
raise Exception(f"Error in step execution: {str(e)}")
|
56 |
+
|
57 |
+
def run_chain(self, steps: List[Dict[str, Any]]) -> Dict[str, str]:
|
58 |
+
"""
|
59 |
+
Execute chain of prompts using the AI assistant.
|
60 |
+
|
61 |
+
Args:
|
62 |
+
steps: List of steps to execute, each containing:
|
63 |
+
- prompt_name: Name of prompt template
|
64 |
+
- variables: Variables for the prompt
|
65 |
+
- output_key: Key to store step output
|
66 |
+
- generation_params: Optional generation parameters
|
67 |
+
|
68 |
+
Returns:
|
69 |
+
Dict of step outputs keyed by output_key
|
70 |
+
|
71 |
+
Example:
|
72 |
+
steps = [
|
73 |
+
{
|
74 |
+
"prompt_name": "analyze",
|
75 |
+
"variables": {"text": "Sample text"},
|
76 |
+
"output_key": "analysis",
|
77 |
+
"generation_params": {"temperature": 0.7}
|
78 |
+
},
|
79 |
+
{
|
80 |
+
"prompt_name": "summarize",
|
81 |
+
"variables": {"text": "{analysis}"},
|
82 |
+
"output_key": "summary"
|
83 |
+
}
|
84 |
+
]
|
85 |
+
"""
|
86 |
+
results = {}
|
87 |
+
|
88 |
+
for step in steps:
|
89 |
+
prompt_name = step["prompt_name"]
|
90 |
+
output_key = step["output_key"]
|
91 |
+
generation_params = step.get("generation_params", None)
|
92 |
+
|
93 |
+
# Process variables, handling references to previous outputs
|
94 |
+
variables = {}
|
95 |
+
for key, value in step.get("variables", {}).items():
|
96 |
+
if isinstance(value, str) and value.startswith("{") and value.endswith("}"):
|
97 |
+
# Extract referenced output key
|
98 |
+
ref_key = value[1:-1]
|
99 |
+
if ref_key not in results:
|
100 |
+
raise ValueError(f"Referenced output '{ref_key}' not found in previous results")
|
101 |
+
variables[key] = results[ref_key]
|
102 |
+
else:
|
103 |
+
variables[key] = value
|
104 |
+
|
105 |
+
# Execute step and store result
|
106 |
+
print(f"\nExecuting step: {prompt_name}...")
|
107 |
+
result = self.execute_step(
|
108 |
+
prompt_name=prompt_name,
|
109 |
+
generation_params=generation_params,
|
110 |
+
variables=variables
|
111 |
+
)
|
112 |
+
results[output_key] = result
|
113 |
+
|
114 |
+
return results
|