Spaces:
Sleeping
Sleeping
File size: 1,416 Bytes
ed88192 e73c0a7 e9fac96 ed88192 e73c0a7 ed88192 e73c0a7 71c73b5 e73c0a7 7f87c29 e73c0a7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 |
# routers/llm_chat.py
from fastapi import APIRouter, HTTPException, Header
from pydantic import BaseModel
from helpers.ai_client import AIClient
import logging
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
router = APIRouter(
prefix="/api/v1", # Prefix for all routes in this router
tags=["LLM Chat"], # Tag for OpenAPI documentation
)
# Initialize the AI client
ai_client = AIClient()
# Pydantic model for request validation
class LLMChatRequest(BaseModel):
prompt: str
system_message: str = ""
model_id: str = "openai/gpt-4o-mini"
conversation_id: str = "string"
user_id: str = "string"
@router.post("/llm-chat", summary="Send a prompt to the LLM", description="This endpoint sends a prompt to the LLM and returns the response.")
async def llm_chat(
request: LLMChatRequest,
x_api_key: str = Header(None, description="API Key for authentication")
):
try:
# Use the AI client to send the prompt
response = ai_client.chat(
prompt=request.prompt,
system_message=request.system_message,
model_id=request.model_id,
conversation_id=request.conversation_id,
user_id=request.user_id
)
return response
except Exception as e:
logger.error(f"Error in llm_chat: {e}")
raise HTTPException(status_code=500, detail=str(e)) |