|
from openai import OpenAI |
|
from typing import Optional, Dict, Any |
|
|
|
class AIAssistant: |
|
""" |
|
A wrapper class for consistent LLM API interactions. |
|
|
|
This class provides: |
|
- Unified interface for different LLM providers |
|
- Consistent handling of generation parameters |
|
- Support for streaming responses |
|
|
|
Attributes: |
|
client: Initialized API client (OpenAI, Anthropic, etc.) |
|
model: Name of the model to use |
|
""" |
|
def __init__(self, client: OpenAI, model: str): |
|
self.client = client |
|
self.model = model |
|
|
|
def generate_response(self, |
|
prompt_template: Any, |
|
generation_params: Optional[Dict] = None, |
|
stream: bool = False, |
|
**kwargs): |
|
""" |
|
Generate LLM response using pthe rovided template and parameters. |
|
|
|
Args: |
|
prompt_template: Template object with format method |
|
generation_params: Optional generation parameters |
|
stream: Whether to stream the response |
|
**kwargs: Variables for prompt template |
|
|
|
Returns: |
|
API response object or streamed response |
|
|
|
Example: |
|
assistant.generate_response( |
|
prompt_template=template, |
|
temperature=0.7, |
|
topic="AI safety" |
|
) |
|
""" |
|
messages = prompt_template.format(**kwargs) |
|
params = generation_params or {} |
|
|
|
completion = self.client.chat.completions.create( |
|
model=self.model, |
|
messages=messages, |
|
stream=stream, |
|
**params |
|
) |
|
|
|
if stream: |
|
for chunk in completion: |
|
if chunk.choices[0].delta.content is not None: |
|
print(chunk.choices[0].delta.content, end="") |
|
return completion |
|
|
|
return completion |