Spaces:
Sleeping
Sleeping
# routers/llm_chat.py | |
from fastapi import APIRouter, HTTPException, Header | |
from pydantic import BaseModel | |
from helpers.ai_client import AIClient | |
import logging | |
# Set up logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
router = APIRouter( | |
prefix="/api/v1", # Prefix for all routes in this router | |
tags=["LLM Chat"], # Tag for OpenAPI documentation | |
) | |
# Initialize the AI client | |
ai_client = AIClient() | |
# Pydantic model for request validation | |
class LLMChatRequest(BaseModel): | |
prompt: str | |
system_message: str = "" | |
model_id: str = "openai/gpt-4o-mini" | |
conversation_id: str = "string" | |
user_id: str = "string" | |
async def llm_chat( | |
request: LLMChatRequest, | |
x_api_key: str = Header(None, description="API Key for authentication") | |
): | |
try: | |
# Use the AI client to send the prompt | |
response = ai_client.chat( | |
prompt=request.prompt, | |
system_message=request.system_message, | |
model_id=request.model_id, | |
conversation_id=request.conversation_id, | |
user_id=request.user_id | |
) | |
return response | |
except Exception as e: | |
logger.error(f"Error in llm_chat: {e}") | |
raise HTTPException(status_code=500, detail=str(e)) |