duckduckgoo / app.py
yuoop's picture
Update app.py
e57df21 verified
raw
history blame
6.17 kB
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import JSONResponse
import requests
import json
import os
import time
app = FastAPI()
# 环境变量配置
STATUS_URL = os.environ.get("STATUS_URL", "https://duckduckgo.com/duckchat/v1/status")
CHAT_URL = os.environ.get("CHAT_URL", "https://duckduckgo.com/duckchat/v1/chat")
REFERER = os.environ.get("REFERER", "https://duckduckgo.com/")
ORIGIN = os.environ.get("ORIGIN", "https://duckduckgo.com")
USER_AGENT = os.environ.get("USER_AGENT", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36")
COOKIE = os.environ.get("COOKIE", "dcm=3; s=l; bf=1") # 从环境变量获取 Cookie
DEFAULT_HEADERS = {
"User-Agent": USER_AGENT,
"Accept": "text/event-stream",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip, deflate, br",
"Referer": REFERER,
"Content-Type": "application/json",
"Origin": ORIGIN,
"Connection": "keep-alive",
"Cookie": COOKIE,
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"Pragma": "no-cache",
"TE": "trailers",
}
SUPPORTED_MODELS = ["o3-mini", "gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Llama-3.3-70B-Instruct-Turbo"]
async def get_vqd():
"""获取 DuckDuckGo Chat 的 VQD 值。"""
headers = {**DEFAULT_HEADERS, "x-vqd-accept": "1"}
try:
response = requests.get(STATUS_URL, headers=headers)
response.raise_for_status() # 抛出 HTTPError,如果状态码不是 200
vqd = response.headers.get("x-vqd-4")
if not vqd:
raise ValueError("x-vqd-4 header 在响应中未找到。")
return vqd
except requests.exceptions.RequestException as e:
raise HTTPException(status_code=500, detail=f"HTTP 请求失败: {e}")
except ValueError as e:
raise HTTPException(status_code=500, detail=str(e))
async def duckduckgo_chat(model, messages):
"""与 DuckDuckGo Chat 进行交互。"""
try:
x_vqd_4 = await get_vqd()
chat_headers = {
**DEFAULT_HEADERS,
"x-vqd-4": x_vqd_4,
}
body = json.dumps({
"model": model,
"messages": messages,
})
response = requests.post(CHAT_URL, headers=chat_headers, data=body, stream=True)
response.raise_for_status()
full_message = ""
for line in response.iter_lines():
if line:
decoded_line = line.decode('utf-8')
if decoded_line.startswith("data: "):
try:
json_data = json.loads(decoded_line[5:])
full_message += json_data.get("message", "")
except json.JSONDecodeError as e:
print(f"JSON 解析错误: {e}, 行: {decoded_line}")
pass # 忽略解析错误
return full_message
except requests.exceptions.RequestException as e:
raise HTTPException(status_code=500, detail=f"HTTP 请求失败: {e}")
except Exception as e:
raise HTTPException(status_code=500, detail=f"聊天过程中发生错误: {e}")
@app.post("/v1/chat/completions")
async def chat_completions(request: Request):
try:
body = await request.json()
if not body:
raise HTTPException(status_code=400, detail="无效的请求体")
model = body.get("model", "o3-mini")
if model not in SUPPORTED_MODELS:
raise HTTPException(status_code=400, detail=f"模型 \"{model}\" 不支持。支持的模型有: {', '.join(SUPPORTED_MODELS)}.")
messages = body.get("messages")
if not messages:
raise HTTPException(status_code=400, detail="未提供任何消息内容")
# 处理 system 消息
system_message = next((msg for msg in messages if msg.get("role") == "system"), None)
system_prompt = f"你将扮演一个{system_message['content']}.\n" if system_message else ""
# 提取历史消息并格式化
history_messages = "\n".join(
f"{msg['role']}: {msg['content']}"
for msg in messages
if msg.get("role") != "system" and msg != messages[-1]
)
# 提取最后一条用户消息
last_user_message = messages[-1]
current_question = last_user_message["content"] if last_user_message.get("role") == "user" else ""
# 构建合并后的消息
combined_message_content = f"{system_prompt}以下是历史对话记录:\n{history_messages}\n用户当前提问:{current_question}"
combined_message = {"role": "user", "content": combined_message_content}
# 发送单条消息
response_text = await duckduckgo_chat(model, [combined_message])
# 构建 OpenAI 风格的响应
openai_response = {
"id": f"chatcmpl-{int(time.time() * 1000)}", # 生成唯一 ID
"object": "chat.completion",
"created": int(time.time()),
"model": model,
"choices": [
{
"message": {
"role": "assistant",
"content": response_text,
},
"finish_reason": "stop",
"index": 0,
},
],
"usage": {
"prompt_tokens": 0,
"completion_tokens": 0,
"total_tokens": 0
},
}
return JSONResponse(content=openai_response)
except HTTPException as e:
raise e # 重新抛出 HTTPException,以便 FastAPI 处理
except Exception as e:
print(f"API 错误: {e}")
raise HTTPException(status_code=500, detail=f"服务器内部错误: {e}")
@app.exception_handler(HTTPException)
async def http_exception_handler(request: Request, exc: HTTPException):
return JSONResponse(
status_code=exc.status_code,
content={"detail": exc.detail},
)
@app.get("/")
async def greet_json():
return {"Hello": "World!"}