File size: 1,667 Bytes
ed88192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
# routers/llm_chat.py
from fastapi import APIRouter, HTTPException, Header
import requests
import os
from pydantic import BaseModel

router = APIRouter(
    prefix="/api/v1",  # Prefix for all routes in this router
    tags=["LLM Chat"],  # Tag for OpenAPI documentation
)

# Load environment variables
LLM_API_URL = os.getenv("LLM_API_URL", "https://pvanand-audio-chat.hf.space/llm-agent")
API_KEY = os.getenv("API_KEY", "44d5c")

# Pydantic model for request validation
class LLMChatRequest(BaseModel):
    prompt: str
    system_message: str = ""
    model_id: str = "openai/gpt-4o-mini"
    conversation_id: str = "string"
    user_id: str = "string"

@router.post("/llm-chat", summary="Send a prompt to the LLM", description="This endpoint sends a prompt to the LLM and returns the response.")
async def llm_chat(
    request: LLMChatRequest,
    x_api_key: str = Header(None, description="API Key for authentication")
):
    if x_api_key != API_KEY:
        raise HTTPException(status_code=403, detail="Invalid API Key")

    payload = {
        "prompt": request.prompt,
        "system_message": request.system_message,
        "model_id": request.model_id,
        "conversation_id": request.conversation_id,
        "user_id": request.user_id
    }

    headers = {
        "accept": "application/json",
        "X-API-Key": x_api_key,
        "Content-Type": "application/json"
    }

    # Use requests to call the external API
    response = requests.post(LLM_API_URL, json=payload, headers=headers)
    if response.status_code != 200:
        raise HTTPException(status_code=response.status_code, detail="Error from LLM API")

    return response.json()