Spaces:
Sleeping
Sleeping
File size: 1,748 Bytes
ade0ea8 e0a400b ade0ea8 e0a400b ade0ea8 e0a400b ade0ea8 e0a400b ade0ea8 e0a400b ade0ea8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
# main.py
from fastapi import FastAPI, HTTPException, Header
from fastapi.middleware.cors import CORSMiddleware
import requests
import os
from pydantic import BaseModel
app = FastAPI()
# Enable CORS for all origins
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allows all origins
allow_credentials=True,
allow_methods=["*"], # Allows all methods
allow_headers=["*"], # Allows all headers
)
# Load environment variables
LLM_API_URL = os.getenv("LLM_API_URL", "https://pvanand-audio-chat.hf.space/llm-agent")
API_KEY = os.getenv("X_API_KEY")
# Pydantic model for request validation
class LLMChatRequest(BaseModel):
prompt: str
system_message: str = ""
model_id: str = "openai/gpt-4o-mini"
conversation_id: str = "string"
user_id: str = "string"
@app.post("/llm-chat")
async def llm_chat(
request: LLMChatRequest,
x_api_key: str = Header(None)
):
if x_api_key != API_KEY:
raise HTTPException(status_code=403, detail="Invalid API Key")
payload = {
"prompt": request.prompt,
"system_message": request.system_message,
"model_id": request.model_id,
"conversation_id": request.conversation_id,
"user_id": request.user_id
}
headers = {
"accept": "application/json",
"X-API-Key": x_api_key,
"Content-Type": "application/json"
}
# Use requests to call the external API
response = requests.post(LLM_API_URL, json=payload, headers=headers)
if response.status_code != 200:
raise HTTPException(status_code=response.status_code, detail="Error from LLM API")
return response.json()
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000) |