Delete api
Browse files- api/__init__.py +0 -0
- api/__pycache__/dummy.txt +0 -1
- api/app.py +0 -40
- api/auth.py +0 -10
- api/config.py +0 -58
- api/logger.py +0 -20
- api/models.py +0 -14
- api/routes.py +0 -59
- api/utils.py +0 -250
api/__init__.py
DELETED
File without changes
|
api/__pycache__/dummy.txt
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
|
|
|
|
api/app.py
DELETED
@@ -1,40 +0,0 @@
|
|
1 |
-
from fastapi import FastAPI, Request
|
2 |
-
from starlette.middleware.cors import CORSMiddleware
|
3 |
-
from fastapi.responses import JSONResponse
|
4 |
-
from api.logger import setup_logger
|
5 |
-
from api.routes import router
|
6 |
-
|
7 |
-
logger = setup_logger(__name__)
|
8 |
-
|
9 |
-
def create_app():
|
10 |
-
app = FastAPI(
|
11 |
-
title="NiansuhAI API Gateway",
|
12 |
-
docs_url=None, # Disable Swagger UI
|
13 |
-
redoc_url=None, # Disable ReDoc
|
14 |
-
openapi_url=None, # Disable OpenAPI schema
|
15 |
-
)
|
16 |
-
|
17 |
-
# CORS settings
|
18 |
-
app.add_middleware(
|
19 |
-
CORSMiddleware,
|
20 |
-
allow_origins=["*"], # Adjust as needed for security
|
21 |
-
allow_credentials=True,
|
22 |
-
allow_methods=["*"],
|
23 |
-
allow_headers=["*"],
|
24 |
-
)
|
25 |
-
|
26 |
-
# Include routes
|
27 |
-
app.include_router(router)
|
28 |
-
|
29 |
-
# Global exception handler for better error reporting
|
30 |
-
@app.exception_handler(Exception)
|
31 |
-
async def global_exception_handler(request: Request, exc: Exception):
|
32 |
-
logger.error(f"An error occurred: {str(exc)}")
|
33 |
-
return JSONResponse(
|
34 |
-
status_code=500,
|
35 |
-
content={"message": "An internal server error occurred."},
|
36 |
-
)
|
37 |
-
|
38 |
-
return app
|
39 |
-
|
40 |
-
app = create_app()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api/auth.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
from fastapi import Depends, HTTPException
|
2 |
-
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
3 |
-
from api.config import APP_SECRET
|
4 |
-
|
5 |
-
security = HTTPBearer()
|
6 |
-
|
7 |
-
def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
8 |
-
if credentials.credentials != APP_SECRET:
|
9 |
-
raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
|
10 |
-
return credentials.credentials
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api/config.py
DELETED
@@ -1,58 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from dotenv import load_dotenv
|
3 |
-
|
4 |
-
load_dotenv()
|
5 |
-
|
6 |
-
# DDG API Configurations
|
7 |
-
DDG_API_ENDPOINT = "https://duckduckgo.com/duckchat/v1/chat"
|
8 |
-
DDG_STATUS_URL = "https://duckduckgo.com/duckchat/v1/status"
|
9 |
-
|
10 |
-
APP_SECRET = os.getenv("APP_SECRET")
|
11 |
-
|
12 |
-
# Allowed Models for DDG
|
13 |
-
ALLOWED_MODELS = [
|
14 |
-
{"id": "gpt-4o-mini", "name": "GPT-4o Mini"},
|
15 |
-
{"id": "claude-3-haiku-20240307", "name": "Claude 3 Haiku"},
|
16 |
-
{"id": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", "name": "Meta LLaMA 3.1-70B Instruct Turbo"},
|
17 |
-
{"id": "mistralai/Mixtral-8x7B-Instruct-v0.1", "name": "Mistral Mixtral 8x7B Instruct v0.1"},
|
18 |
-
# Add more DDG models as needed
|
19 |
-
]
|
20 |
-
|
21 |
-
MODEL_MAPPING = {
|
22 |
-
"gpt-4o-mini": "gpt-4o-mini",
|
23 |
-
"claude-3-haiku": "claude-3-haiku-20240307",
|
24 |
-
"llama-3.1-70b": "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
25 |
-
"mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",
|
26 |
-
# Add more mappings as needed
|
27 |
-
}
|
28 |
-
|
29 |
-
# AGENT_MODE, TRENDING_AGENT_MODE, MODEL_PREFIXES, MODEL_REFERERS for DDG models
|
30 |
-
AGENT_MODE = {
|
31 |
-
'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"},
|
32 |
-
'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
|
33 |
-
# Add DDG-specific agent modes if required
|
34 |
-
}
|
35 |
-
|
36 |
-
TRENDING_AGENT_MODE = {
|
37 |
-
"gpt-4o-mini": {},
|
38 |
-
"claude-3-haiku-20240307": {},
|
39 |
-
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": {},
|
40 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1": {},
|
41 |
-
# Add more DDG-specific trending agent modes if required
|
42 |
-
}
|
43 |
-
|
44 |
-
MODEL_PREFIXES = {
|
45 |
-
'gpt-4o-mini': '@GPT-4o Mini',
|
46 |
-
'claude-3-haiku-20240307': '@Claude-3-Haiku',
|
47 |
-
'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo': '@Meta-LLaMA-3.1-70B',
|
48 |
-
'mistralai/Mixtral-8x7B-Instruct-v0.1': '@Mistral-Mixtral-8x7B',
|
49 |
-
# Add more DDG-specific prefixes as needed
|
50 |
-
}
|
51 |
-
|
52 |
-
MODEL_REFERERS = {
|
53 |
-
"gpt-4o-mini": "/?model=gpt-4o-mini",
|
54 |
-
"claude-3-haiku-20240307": "/?model=claude-3-haiku-20240307",
|
55 |
-
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo": "/?model=meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
56 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1": "/?model=mistralai/Mixtral-8x7B-Instruct-v0.1",
|
57 |
-
# Add more DDG-specific referers as needed
|
58 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api/logger.py
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
import logging
|
2 |
-
|
3 |
-
def setup_logger(name):
|
4 |
-
logger = logging.getLogger(name)
|
5 |
-
if not logger.handlers:
|
6 |
-
logger.setLevel(logging.INFO)
|
7 |
-
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
8 |
-
|
9 |
-
# Console handler
|
10 |
-
console_handler = logging.StreamHandler()
|
11 |
-
console_handler.setFormatter(formatter)
|
12 |
-
logger.addHandler(console_handler)
|
13 |
-
|
14 |
-
# File Handler - Error Level
|
15 |
-
# error_file_handler = logging.FileHandler('error.log')
|
16 |
-
# error_file_handler.setFormatter(formatter)
|
17 |
-
# error_file_handler.setLevel(logging.ERROR)
|
18 |
-
# logger.addHandler(error_file_handler)
|
19 |
-
|
20 |
-
return logger
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api/models.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
from typing import List, Optional
|
2 |
-
from pydantic import BaseModel
|
3 |
-
|
4 |
-
class Message(BaseModel):
|
5 |
-
role: str
|
6 |
-
content: str | list
|
7 |
-
|
8 |
-
class ChatRequest(BaseModel):
|
9 |
-
model: str
|
10 |
-
messages: List[Message]
|
11 |
-
stream: Optional[bool] = False
|
12 |
-
temperature: Optional[float] = 0.7
|
13 |
-
top_p: Optional[float] = 0.9
|
14 |
-
max_tokens: Optional[int] = 99999999
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api/routes.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
from fastapi import APIRouter, Depends, HTTPException, Request, Response
|
3 |
-
from fastapi.responses import StreamingResponse
|
4 |
-
from api.auth import verify_app_secret
|
5 |
-
from api.config import ALLOWED_MODELS
|
6 |
-
from api.models import ChatRequest
|
7 |
-
from api.utils import process_ddg_non_streaming_response, process_ddg_streaming_response
|
8 |
-
from api.logger import setup_logger
|
9 |
-
|
10 |
-
logger = setup_logger(__name__)
|
11 |
-
|
12 |
-
router = APIRouter()
|
13 |
-
|
14 |
-
@router.options("/v1/chat/completions")
|
15 |
-
@router.options("/api/v1/chat/completions")
|
16 |
-
async def chat_completions_options():
|
17 |
-
return Response(
|
18 |
-
status_code=200,
|
19 |
-
headers={
|
20 |
-
"Access-Control-Allow-Origin": "*",
|
21 |
-
"Access-Control-Allow-Methods": "POST, OPTIONS",
|
22 |
-
"Access-Control-Allow-Headers": "Content-Type, Authorization",
|
23 |
-
},
|
24 |
-
)
|
25 |
-
|
26 |
-
@router.get("/v1/models")
|
27 |
-
@router.get("/api/v1/models")
|
28 |
-
async def list_models():
|
29 |
-
return {"object": "list", "data": ALLOWED_MODELS}
|
30 |
-
|
31 |
-
@router.post("/v1/chat/completions")
|
32 |
-
@router.post("/api/v1/chat/completions")
|
33 |
-
async def chat_completions(
|
34 |
-
request: ChatRequest, app_secret: str = Depends(verify_app_secret)
|
35 |
-
):
|
36 |
-
logger.info("Entering chat_completions route")
|
37 |
-
logger.info(f"Processing chat completion request for model: {request.model}")
|
38 |
-
|
39 |
-
if request.model not in [model["id"] for model in ALLOWED_MODELS]:
|
40 |
-
raise HTTPException(
|
41 |
-
status_code=400,
|
42 |
-
detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
|
43 |
-
)
|
44 |
-
|
45 |
-
if request.stream:
|
46 |
-
logger.info("Streaming response")
|
47 |
-
return StreamingResponse(process_ddg_streaming_response(request), media_type="text/event-stream")
|
48 |
-
else:
|
49 |
-
logger.info("Non-streaming response")
|
50 |
-
return await process_ddg_non_streaming_response(request)
|
51 |
-
|
52 |
-
@router.route('/')
|
53 |
-
@router.route('/healthz')
|
54 |
-
@router.route('/ready')
|
55 |
-
@router.route('/alive')
|
56 |
-
@router.route('/status')
|
57 |
-
@router.get("/health")
|
58 |
-
def health_check(request: Request):
|
59 |
-
return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api/utils.py
DELETED
@@ -1,250 +0,0 @@
|
|
1 |
-
from datetime import datetime
|
2 |
-
import json
|
3 |
-
from typing import Any, Dict, Optional
|
4 |
-
|
5 |
-
import httpx
|
6 |
-
import aiohttp
|
7 |
-
from api.config import (
|
8 |
-
DDG_API_ENDPOINT,
|
9 |
-
DDG_STATUS_URL,
|
10 |
-
MODEL_MAPPING,
|
11 |
-
ALLOWED_MODELS,
|
12 |
-
AGENT_MODE,
|
13 |
-
TRENDING_AGENT_MODE,
|
14 |
-
MODEL_PREFIXES,
|
15 |
-
MODEL_REFERERS
|
16 |
-
)
|
17 |
-
from fastapi import HTTPException
|
18 |
-
from api.models import ChatRequest
|
19 |
-
|
20 |
-
from api.logger import setup_logger
|
21 |
-
|
22 |
-
import uuid
|
23 |
-
|
24 |
-
logger = setup_logger(__name__)
|
25 |
-
|
26 |
-
async def get_ddg_vqd():
|
27 |
-
status_url = DDG_STATUS_URL
|
28 |
-
|
29 |
-
headers = {
|
30 |
-
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
|
31 |
-
'Accept': 'text/event-stream',
|
32 |
-
'x-vqd-accept': '1'
|
33 |
-
}
|
34 |
-
|
35 |
-
async with aiohttp.ClientSession() as session:
|
36 |
-
try:
|
37 |
-
async with session.get(status_url, headers=headers) as response:
|
38 |
-
if response.status == 200:
|
39 |
-
vqd = response.headers.get("x-vqd-4")
|
40 |
-
if not vqd:
|
41 |
-
logger.error("VQD token not found in response headers.")
|
42 |
-
else:
|
43 |
-
logger.debug(f"VQD token retrieved: {vqd}")
|
44 |
-
return vqd
|
45 |
-
else:
|
46 |
-
logger.error(f"Error: Status code {response.status} when fetching VQD")
|
47 |
-
return None
|
48 |
-
except Exception as e:
|
49 |
-
logger.error(f"Error getting VQD: {e}")
|
50 |
-
return None
|
51 |
-
|
52 |
-
def message_to_dict_ddg(message):
|
53 |
-
if isinstance(message.content, str):
|
54 |
-
return {"role": message.role, "content": message.content}
|
55 |
-
else:
|
56 |
-
# If DDG API does not support non-string content, raise an error
|
57 |
-
raise ValueError("Message content must be a string.")
|
58 |
-
|
59 |
-
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
60 |
-
"""Remove the model prefix from the response content if present."""
|
61 |
-
if model_prefix and content.startswith(model_prefix):
|
62 |
-
logger.debug(f"Stripping prefix '{model_prefix}' from content.")
|
63 |
-
return content[len(model_prefix):].strip()
|
64 |
-
logger.debug("No prefix to strip from content.")
|
65 |
-
return content
|
66 |
-
|
67 |
-
def create_chat_completion_data(
|
68 |
-
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
69 |
-
) -> Dict[str, Any]:
|
70 |
-
return {
|
71 |
-
"id": f"chatcmpl-{uuid.uuid4()}",
|
72 |
-
"object": "chat.completion.chunk",
|
73 |
-
"created": timestamp,
|
74 |
-
"model": model,
|
75 |
-
"choices": [
|
76 |
-
{
|
77 |
-
"index": 0,
|
78 |
-
"delta": {"content": content, "role": "assistant"},
|
79 |
-
"finish_reason": finish_reason,
|
80 |
-
}
|
81 |
-
],
|
82 |
-
"usage": None,
|
83 |
-
}
|
84 |
-
|
85 |
-
async def process_ddg_streaming_response(request: ChatRequest):
|
86 |
-
agent_mode = AGENT_MODE.get(request.model, {})
|
87 |
-
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
88 |
-
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
89 |
-
# referer_path and referer_url are not required unless specified by DDG API
|
90 |
-
|
91 |
-
# Obtain VQD token
|
92 |
-
vqd = await get_ddg_vqd()
|
93 |
-
if not vqd:
|
94 |
-
raise HTTPException(status_code=500, detail="Failed to obtain VQD token")
|
95 |
-
|
96 |
-
# Build headers
|
97 |
-
dynamic_headers = {
|
98 |
-
'accept': 'text/event-stream',
|
99 |
-
'content-type': 'application/json',
|
100 |
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
|
101 |
-
'x-vqd-4': vqd
|
102 |
-
}
|
103 |
-
|
104 |
-
# Construct message history
|
105 |
-
try:
|
106 |
-
message_history = [message_to_dict_ddg(msg) for msg in request.messages]
|
107 |
-
except ValueError as ve:
|
108 |
-
logger.error(f"Invalid message format: {ve}")
|
109 |
-
raise HTTPException(status_code=400, detail=str(ve))
|
110 |
-
|
111 |
-
json_data = {
|
112 |
-
"model": MODEL_MAPPING.get(request.model, request.model),
|
113 |
-
"messages": message_history,
|
114 |
-
"previewToken": None,
|
115 |
-
"userId": None,
|
116 |
-
"codeModelMode": True,
|
117 |
-
"agentMode": agent_mode,
|
118 |
-
"trendingAgentMode": trending_agent_mode,
|
119 |
-
"isMicMode": False,
|
120 |
-
"userSystemPrompt": None,
|
121 |
-
"maxTokens": request.max_tokens,
|
122 |
-
"playgroundTopP": request.top_p,
|
123 |
-
"playgroundTemperature": request.temperature,
|
124 |
-
"isChromeExt": False,
|
125 |
-
"githubToken": None,
|
126 |
-
"clickedAnswer2": False,
|
127 |
-
"clickedAnswer3": False,
|
128 |
-
"clickedForceWebSearch": False,
|
129 |
-
"visitFromDelta": False,
|
130 |
-
"mobileClient": False,
|
131 |
-
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
|
132 |
-
}
|
133 |
-
|
134 |
-
logger.debug(f"Sending JSON payload to DDG API: {json.dumps(json_data)}")
|
135 |
-
|
136 |
-
async with httpx.AsyncClient() as client:
|
137 |
-
try:
|
138 |
-
async with client.stream(
|
139 |
-
"POST",
|
140 |
-
DDG_API_ENDPOINT,
|
141 |
-
headers=dynamic_headers,
|
142 |
-
json=json_data,
|
143 |
-
timeout=100,
|
144 |
-
) as response:
|
145 |
-
response.raise_for_status()
|
146 |
-
async for line in response.aiter_lines():
|
147 |
-
timestamp = int(datetime.now().timestamp())
|
148 |
-
if line:
|
149 |
-
content = line
|
150 |
-
if content.startswith("$@$v=undefined-rv1$@$"):
|
151 |
-
content = content[21:]
|
152 |
-
# Strip the model prefix from the response content
|
153 |
-
cleaned_content = strip_model_prefix(content, model_prefix)
|
154 |
-
yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
|
155 |
-
|
156 |
-
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
157 |
-
yield "data: [DONE]\n\n"
|
158 |
-
except httpx.HTTPStatusError as e:
|
159 |
-
logger.error(f"HTTP error occurred: {e}")
|
160 |
-
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
161 |
-
except httpx.RequestError as e:
|
162 |
-
logger.error(f"Error occurred during request: {e}")
|
163 |
-
raise HTTPException(status_code=500, detail=str(e))
|
164 |
-
|
165 |
-
async def process_ddg_non_streaming_response(request: ChatRequest):
|
166 |
-
agent_mode = AGENT_MODE.get(request.model, {})
|
167 |
-
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
168 |
-
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
169 |
-
|
170 |
-
# Obtain VQD token
|
171 |
-
vqd = await get_ddg_vqd()
|
172 |
-
if not vqd:
|
173 |
-
raise HTTPException(status_code=500, detail="Failed to obtain VQD token")
|
174 |
-
|
175 |
-
# Build headers
|
176 |
-
dynamic_headers = {
|
177 |
-
'accept': 'application/json',
|
178 |
-
'content-type': 'application/json',
|
179 |
-
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
|
180 |
-
'x-vqd-4': vqd
|
181 |
-
}
|
182 |
-
|
183 |
-
# Construct message history
|
184 |
-
try:
|
185 |
-
message_history = [message_to_dict_ddg(msg) for msg in request.messages]
|
186 |
-
except ValueError as ve:
|
187 |
-
logger.error(f"Invalid message format: {ve}")
|
188 |
-
raise HTTPException(status_code=400, detail=str(ve))
|
189 |
-
|
190 |
-
json_data = {
|
191 |
-
"model": MODEL_MAPPING.get(request.model, request.model),
|
192 |
-
"messages": message_history,
|
193 |
-
"previewToken": None,
|
194 |
-
"userId": None,
|
195 |
-
"codeModelMode": True,
|
196 |
-
"agentMode": agent_mode,
|
197 |
-
"trendingAgentMode": trending_agent_mode,
|
198 |
-
"isMicMode": False,
|
199 |
-
"userSystemPrompt": None,
|
200 |
-
"maxTokens": request.max_tokens,
|
201 |
-
"playgroundTopP": request.top_p,
|
202 |
-
"playgroundTemperature": request.temperature,
|
203 |
-
"isChromeExt": False,
|
204 |
-
"githubToken": None,
|
205 |
-
"clickedAnswer2": False,
|
206 |
-
"clickedAnswer3": False,
|
207 |
-
"clickedForceWebSearch": False,
|
208 |
-
"visitFromDelta": False,
|
209 |
-
"mobileClient": False,
|
210 |
-
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
|
211 |
-
}
|
212 |
-
|
213 |
-
logger.debug(f"Sending JSON payload to DDG API: {json.dumps(json_data)}")
|
214 |
-
|
215 |
-
async with httpx.AsyncClient() as client:
|
216 |
-
try:
|
217 |
-
response = await client.post(
|
218 |
-
DDG_API_ENDPOINT,
|
219 |
-
headers=dynamic_headers,
|
220 |
-
json=json_data,
|
221 |
-
timeout=100
|
222 |
-
)
|
223 |
-
response.raise_for_status()
|
224 |
-
full_response = response.text
|
225 |
-
except httpx.HTTPStatusError as e:
|
226 |
-
logger.error(f"HTTP error occurred: {e}")
|
227 |
-
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
228 |
-
except httpx.RequestError as e:
|
229 |
-
logger.error(f"Error occurred during request: {e}")
|
230 |
-
raise HTTPException(status_code=500, detail=str(e))
|
231 |
-
|
232 |
-
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
233 |
-
full_response = full_response[21:]
|
234 |
-
|
235 |
-
cleaned_full_response = strip_model_prefix(full_response, model_prefix)
|
236 |
-
|
237 |
-
return {
|
238 |
-
"id": f"chatcmpl-{uuid.uuid4()}",
|
239 |
-
"object": "chat.completion",
|
240 |
-
"created": int(datetime.now().timestamp()),
|
241 |
-
"model": request.model,
|
242 |
-
"choices": [
|
243 |
-
{
|
244 |
-
"index": 0,
|
245 |
-
"message": {"role": "assistant", "content": cleaned_full_response},
|
246 |
-
"finish_reason": "stop",
|
247 |
-
}
|
248 |
-
],
|
249 |
-
"usage": None,
|
250 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|