pvanand's picture
Update helpers/ai_client.py
25f7a75 verified
raw
history blame
2.47 kB
# helpers/ai_client.py
import os
from openai import OpenAI
from typing import Optional
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class AIClient:
def __init__(self):
# Load environment variables
self.base_url = os.getenv("LLM_API_URL", "https://openrouter.ai/api/v1")
self.api_key = os.getenv("OPENROUTER_API_KEY")
self.site_url = os.getenv("SITE_URL", "") # Your site URL
self.site_name = os.getenv("SITE_NAME", "") # Your site name
# Initialize OpenAI client
self.client = OpenAI(
base_url=self.base_url,
api_key=self.api_key
)
def chat(
self,
prompt: str,
system_message: str = "",
model_id: str = "openai/gpt-4o-mini",
conversation_id: str = "",
user_id: str = "string"
) -> str:
"""
Sends a prompt to the OpenRouter API and returns the response as text.
Args:
prompt (str): The user's input prompt.
system_message (str): Optional system message for the LLM.
model_id (str): The model ID to use (default: "openai/gpt-4o-mini").
conversation_id (str): Unique ID for the conversation.
user_id (str): Unique ID for the user.
api_key (str): API key for authentication.
Returns:
str: The text response from the LLM API.
Raises:
Exception: If the API request fails.
"""
try:
messages = []
# Add system message if provided
if system_message:
messages.append({"role": "system", "content": system_message})
# Add user message
messages.append({"role": "user", "content": prompt})
# Create completion
completion = self.client.chat.completions.create(
extra_headers={
"HTTP-Referer": self.site_url, # Optional
"X-Title": self.site_name, # Optional
},
model=model_id,
messages=messages
)
# Return the response text
return completion.choices[0].message.content
except Exception as e:
logger.error(f"Error in chat completion: {str(e)}")
raise Exception(f"Error from OpenRouter API: {str(e)}")