Update api/routes.py
Browse files- api/routes.py +11 -10
api/routes.py
CHANGED
@@ -1,8 +1,10 @@
|
|
|
|
|
|
1 |
import json
|
2 |
from fastapi import APIRouter, Depends, HTTPException, Request, Response
|
3 |
from fastapi.responses import StreamingResponse
|
4 |
from api.auth import verify_app_secret
|
5 |
-
from api.config import
|
6 |
from api.models import ChatRequest
|
7 |
from api.utils import process_non_streaming_response, process_streaming_response
|
8 |
from api.logger import setup_logger
|
@@ -26,7 +28,7 @@ async def chat_completions_options():
|
|
26 |
@router.get("/v1/models")
|
27 |
@router.get("/api/v1/models")
|
28 |
async def list_models():
|
29 |
-
return {"object": "list", "data":
|
30 |
|
31 |
@router.post("/v1/chat/completions")
|
32 |
@router.post("/api/v1/chat/completions")
|
@@ -36,11 +38,10 @@ async def chat_completions(
|
|
36 |
logger.info("Entering chat_completions route")
|
37 |
logger.info(f"Processing chat completion request for model: {request.model}")
|
38 |
|
39 |
-
if request.model not in
|
40 |
-
allowed = ', '.join(model['id'] for model in ALLOWED_MODELS)
|
41 |
raise HTTPException(
|
42 |
status_code=400,
|
43 |
-
detail=f"Model
|
44 |
)
|
45 |
|
46 |
if request.stream:
|
@@ -50,11 +51,11 @@ async def chat_completions(
|
|
50 |
logger.info("Non-streaming response")
|
51 |
return await process_non_streaming_response(request)
|
52 |
|
53 |
-
|
|
|
|
|
|
|
|
|
54 |
@router.get("/health")
|
55 |
-
@router.get("/healthz")
|
56 |
-
@router.get("/ready")
|
57 |
-
@router.get("/alive")
|
58 |
-
@router.get("/status")
|
59 |
def health_check(request: Request):
|
60 |
return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
|
|
|
1 |
+
# api/routes.py
|
2 |
+
|
3 |
import json
|
4 |
from fastapi import APIRouter, Depends, HTTPException, Request, Response
|
5 |
from fastapi.responses import StreamingResponse
|
6 |
from api.auth import verify_app_secret
|
7 |
+
from api.config import MODELS
|
8 |
from api.models import ChatRequest
|
9 |
from api.utils import process_non_streaming_response, process_streaming_response
|
10 |
from api.logger import setup_logger
|
|
|
28 |
@router.get("/v1/models")
|
29 |
@router.get("/api/v1/models")
|
30 |
async def list_models():
|
31 |
+
return {"object": "list", "data": MODELS}
|
32 |
|
33 |
@router.post("/v1/chat/completions")
|
34 |
@router.post("/api/v1/chat/completions")
|
|
|
38 |
logger.info("Entering chat_completions route")
|
39 |
logger.info(f"Processing chat completion request for model: {request.model}")
|
40 |
|
41 |
+
if request.model not in MODELS:
|
|
|
42 |
raise HTTPException(
|
43 |
status_code=400,
|
44 |
+
detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(MODELS)}",
|
45 |
)
|
46 |
|
47 |
if request.stream:
|
|
|
51 |
logger.info("Non-streaming response")
|
52 |
return await process_non_streaming_response(request)
|
53 |
|
54 |
+
@router.route('/')
|
55 |
+
@router.route('/healthz')
|
56 |
+
@router.route('/ready')
|
57 |
+
@router.route('/alive')
|
58 |
+
@router.route('/status')
|
59 |
@router.get("/health")
|
|
|
|
|
|
|
|
|
60 |
def health_check(request: Request):
|
61 |
return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
|