File size: 4,156 Bytes
7326c38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
from typing import Dict, Any, List
import yaml

class SimplePromptChain:
    """A flexible prompt chain implementation using an AIAssistant wrapper."""

    def __init__(self, assistant: AIAssistant, prompts_path: str):
        """
        Initialize chain with AI assistant and prompts.
        
        Args:
            assistant: Configured AIAssistant instance
            prompts_path: Path to YAML prompts file
        """
        self.assistant = assistant
        self.prompts = PromptLoader.load_prompts(prompts_path)

    def execute_step(self,
                    prompt_name: str,
                    generation_params: Dict[str, Any] = None,
                    variables: Dict[str, Any] = None) -> str:
        """
        Execute single chain step using the AI assistant.
        
        Args:
            prompt_name: Name of prompt template to use
            generation_params: Optional parameters for generation
            variables: Variables to format the prompt
            
        Returns:
            Processed response content
            
        Raises:
            ValueError: If prompt template not found
        """
        # Validate prompt exists
        if prompt_name not in self.prompts:
            raise ValueError(f"Prompt '{prompt_name}' not found in loaded templates")

        prompt_template = self.prompts[prompt_name]
        
        try:
            # Generate response using assistant
            response = self.assistant.generate_response(
                prompt_template=prompt_template,
                generation_params=generation_params,
                stream=True,
                **variables or {}
            )
            
            # Extract and return content from response
            return response.choices[0].message.content
            
        except Exception as e:
            raise Exception(f"Error in step execution: {str(e)}")

    def run_chain(self, steps: List[Dict[str, Any]]) -> Dict[str, str]:
        """
        Execute chain of prompts using the AI assistant.

        Args:
            steps: List of steps to execute, each containing:
                - prompt_name: Name of prompt template
                - variables: Variables for the prompt
                - output_key: Key to store step output
                - generation_params: Optional generation parameters
                
        Returns:
            Dict of step outputs keyed by output_key
            
        Example:
            steps = [
                {
                    "prompt_name": "analyze",
                    "variables": {"text": "Sample text"},
                    "output_key": "analysis",
                    "generation_params": {"temperature": 0.7}
                },
                {
                    "prompt_name": "summarize",
                    "variables": {"text": "{analysis}"},
                    "output_key": "summary"
                }
            ]
        """
        results = {}
        
        for step in steps:
            prompt_name = step["prompt_name"]
            output_key = step["output_key"]
            generation_params = step.get("generation_params", None)
            
            # Process variables, handling references to previous outputs
            variables = {}
            for key, value in step.get("variables", {}).items():
                if isinstance(value, str) and value.startswith("{") and value.endswith("}"):
                    # Extract referenced output key
                    ref_key = value[1:-1]
                    if ref_key not in results:
                        raise ValueError(f"Referenced output '{ref_key}' not found in previous results")
                    variables[key] = results[ref_key]
                else:
                    variables[key] = value

            # Execute step and store result
            print(f"\nExecuting step: {prompt_name}...")
            result = self.execute_step(
                prompt_name=prompt_name,
                generation_params=generation_params,
                variables=variables
            )
            results[output_key] = result

        return results