from fastapi import FastAPI, HTTPException, Request from fastapi.responses import JSONResponse, StreamingResponse from fastapi.middleware.cors import CORSMiddleware import requests import json import os import time import asyncio app = FastAPI() # CORS 设置 app.add_middleware( CORSMiddleware, allow_origins=["*"], # 允许所有来源,生产环境请修改为具体的域名 allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # 环境变量配置 STATUS_URL = os.environ.get("STATUS_URL", "https://duckduckgo.com/duckchat/v1/status") CHAT_URL = os.environ.get("CHAT_URL", "https://duckduckgo.com/duckchat/v1/chat") REFERER = os.environ.get("REFERER", "https://duckduckgo.com/") ORIGIN = os.environ.get("ORIGIN", "https://duckduckgo.com") USER_AGENT = os.environ.get("USER_AGENT", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36") COOKIE = os.environ.get("COOKIE", "dcm=3; s=l; bf=1") # 从环境变量获取 Cookie DEFAULT_HEADERS = { "User-Agent": USER_AGENT, "Accept": "text/event-stream", "Accept-Language": "en-US,en;q=0.5", "Referer": REFERER, "Content-Type": "application/json", "Origin": ORIGIN, "Connection": "keep-alive", "Cookie": COOKIE, "Sec-Fetch-Dest": "empty", "Sec-Fetch-Mode": "cors", "Sec-Fetch-Site": "same-origin", "Pragma": "no-cache", "TE": "trailers", } SUPPORTED_MODELS = ["o3-mini", "gpt-4o-mini", "claude-3-haiku-20240307", "meta-llama/Llama-3.3-70B-Instruct-Turbo"] async def get_vqd(): """获取 DuckDuckGo Chat 的 VQD 值。""" headers = {**DEFAULT_HEADERS, "x-vqd-accept": "1"} try: response = requests.get(STATUS_URL, headers=headers) response.raise_for_status() # 抛出 HTTPError,如果状态码不是 200 vqd = response.headers.get("x-vqd-4") if not vqd: raise ValueError("x-vqd-4 header 在响应中未找到。") return vqd except requests.exceptions.RequestException as e: raise HTTPException(status_code=500, detail=f"HTTP 请求失败: {e}") except ValueError as e: raise HTTPException(status_code=500, detail=str(e)) async def duckduckgo_chat_stream(model, messages): """与 DuckDuckGo Chat 进行交互,流式输出。""" try: x_vqd_4 = await get_vqd() chat_headers = { **DEFAULT_HEADERS, "x-vqd-4": x_vqd_4, "Accept": "text/event-stream", # 确保接受 SSE } body = json.dumps({ "model": model, "messages": messages, }) response = requests.post(CHAT_URL, headers=chat_headers, data=body, stream=True) response.raise_for_status() async def event_stream(): try: for line in response.iter_lines(): if line: decoded_line = line.decode('utf-8') if decoded_line.startswith("data: "): try: json_data = json.loads(decoded_line[5:]) message_content = json_data.get("message", "") if message_content: # 构建 OpenAI 风格的流式响应 openai_stream_response = { "id": f"chatcmpl-{int(time.time() * 1000)}", "object": "chat.completion.chunk", "created": int(time.time()), "model": model, "choices": [ { "delta": { "content": message_content }, "index": 0, "finish_reason": None } ], } yield f"data: {json.dumps(openai_stream_response)}\n\n" await asyncio.sleep(0.01) # 避免 CPU 占用过高 except json.JSONDecodeError as e: print(f"JSON 解析错误: {e}, 行: {decoded_line}") yield f"data: {json.dumps({'error': 'JSON 解析错误'})}\n\n" # 返回错误信息 break # 停止流式传输 except requests.exceptions.RequestException as e: print(f"请求错误: {e}") yield f"data: {json.dumps({'error': '请求错误'})}\n\n" except Exception as e: print(f"发生错误: {e}") yield f"data: {json.dumps({'error': '发生错误'})}\n\n" finally: yield "data: [DONE]\n\n" # 结束 SSE 流 return StreamingResponse(event_stream(), media_type="text/event-stream") except requests.exceptions.RequestException as e: raise HTTPException(status_code=500, detail=f"HTTP 请求失败: {e}") except Exception as e: raise HTTPException(status_code=500, detail=f"聊天过程中发生错误: {e}") async def duckduckgo_chat_non_stream(model, messages): """与 DuckDuckGo Chat 进行交互,非流式输出。""" try: x_vqd_4 = await get_vqd() chat_headers = { **DEFAULT_HEADERS, "x-vqd-4": x_vqd_4, } body = json.dumps({ "model": model, "messages": messages, }) response = requests.post(CHAT_URL, headers=chat_headers, data=body) response.raise_for_status() full_message = "" for line in response.iter_lines(): if line: decoded_line = line.decode('utf-8') if decoded_line.startswith("data: "): try: json_data = json.loads(decoded_line[5:]) full_message += json_data.get("message", "") except json.JSONDecodeError as e: print(f"JSON 解析错误: {e}, 行: {decoded_line}") pass # 忽略解析错误 return full_message except requests.exceptions.RequestException as e: raise HTTPException(status_code=500, detail=f"HTTP 请求失败: {e}") except Exception as e: raise HTTPException(status_code=500, detail=f"聊天过程中发生错误: {e}") @app.post("/v1/chat/completions") async def chat_completions(request: Request): try: body = await request.json() if not body: raise HTTPException(status_code=400, detail="无效的请求体") model = body.get("model", "o3-mini") if model not in SUPPORTED_MODELS: raise HTTPException(status_code=400, detail=f"模型 \"{model}\" 不支持。支持的模型有: {', '.join(SUPPORTED_MODELS)}.") messages = body.get("messages") if not messages: raise HTTPException(status_code=400, detail="未提供任何消息内容") stream = body.get("stream", False) # 获取 stream 参数,默认为 False # 处理 system 消息 system_message = next((msg for msg in messages if msg.get("role") == "system"), None) system_prompt = f"你将扮演一个{system_message['content']}.\n" if system_message else "" # 提取历史消息并格式化 history_messages = "\n".join( f"{msg['role']}: {msg['content']}" for msg in messages if msg.get("role") != "system" and msg != messages[-1] ) # 提取最后一条用户消息 last_user_message = messages[-1] current_question = last_user_message["content"] if last_user_message.get("role") == "user" else "" # 构建合并后的消息 combined_message_content = f"{system_prompt}以下是历史对话记录:\n{history_messages}\n用户当前提问:{current_question}" combined_message = {"role": "user", "content": combined_message_content} if stream: return await duckduckgo_chat_stream(model, [combined_message]) else: response_text = await duckduckgo_chat_non_stream(model, [combined_message]) # 构建 OpenAI 风格的响应 openai_response = { "id": f"chatcmpl-{int(time.time() * 1000)}", # 生成唯一 ID "object": "chat.completion", "created": int(time.time()), "model": model, "choices": [ { "message": { "role": "assistant", "content": response_text, }, "finish_reason": "stop", "index": 0, }, ], "usage": { "prompt_tokens": 0, "completion_tokens": 0, "total_tokens": 0 }, } return JSONResponse(content=openai_response) except HTTPException as e: raise e # 重新抛出 HTTPException,以便 FastAPI 处理 except Exception as e: print(f"API 错误: {e}") raise HTTPException(status_code=500, detail=f"服务器内部错误: {e}") @app.exception_handler(HTTPException) async def http_exception_handler(request: Request, exc: HTTPException): return JSONResponse( status_code=exc.status_code, content={"detail": exc.detail}, ) @app.get("/") async def greet_json(): return {"Hello": "World!"}