Upload 6 files
Browse files- Dockerfile +35 -0
- README.md +11 -11
- app.py +581 -0
- docker-compose.yml +20 -0
- requirements.txt +4 -0
- vercel.json +21 -0
Dockerfile
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 使用 Python 3.11 作为基础镜像
|
2 |
+
FROM python:3.11-slim
|
3 |
+
|
4 |
+
# 设置为中国国内源(针对 Bookworm/Debian 12)
|
5 |
+
RUN rm -rf /etc/apt/sources.list.d/* && \
|
6 |
+
echo "deb http://mirrors.ustc.edu.cn/debian bookworm main" > /etc/apt/sources.list && \
|
7 |
+
echo "deb http://mirrors.ustc.edu.cn/debian bookworm-updates main" >> /etc/apt/sources.list && \
|
8 |
+
echo "deb http://mirrors.ustc.edu.cn/debian-security bookworm-security main" >> /etc/apt/sources.list
|
9 |
+
|
10 |
+
# 设置工作目录
|
11 |
+
WORKDIR /app
|
12 |
+
|
13 |
+
# 设置环境变量
|
14 |
+
ENV PYTHONUNBUFFERED=1 \
|
15 |
+
PYTHONDONTWRITEBYTECODE=1 \
|
16 |
+
POETRY_VERSION=1.6.1
|
17 |
+
|
18 |
+
# 安装系统依赖
|
19 |
+
RUN apt-get update \
|
20 |
+
&& apt-get install -y --no-install-recommends \
|
21 |
+
curl \
|
22 |
+
build-essential \
|
23 |
+
&& rm -rf /var/lib/apt/lists/*
|
24 |
+
|
25 |
+
# 复制项目文件
|
26 |
+
COPY . .
|
27 |
+
|
28 |
+
# 安装 Python 依赖
|
29 |
+
RUN pip install --no-cache-dir -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple
|
30 |
+
|
31 |
+
# 暴露端口
|
32 |
+
EXPOSE 8000
|
33 |
+
|
34 |
+
# 启动命令
|
35 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "8000"]
|
README.md
CHANGED
@@ -1,11 +1,11 @@
|
|
1 |
-
---
|
2 |
-
title: DangbeiAI
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
-
sdk: docker
|
7 |
-
pinned: false
|
8 |
-
|
9 |
-
---
|
10 |
-
|
11 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
+
---
|
2 |
+
title: DangbeiAI
|
3 |
+
emoji: 🔥
|
4 |
+
colorFrom: pink
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: docker
|
7 |
+
pinned: false
|
8 |
+
app_port: 3000
|
9 |
+
---
|
10 |
+
|
11 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,581 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import secrets
|
2 |
+
import time
|
3 |
+
import uuid
|
4 |
+
import hashlib
|
5 |
+
import json
|
6 |
+
import httpx
|
7 |
+
import logging
|
8 |
+
from typing import AsyncGenerator, List, Dict, Union
|
9 |
+
from pydantic import BaseModel, Field
|
10 |
+
from fastapi import FastAPI, HTTPException, Header
|
11 |
+
from fastapi.responses import StreamingResponse
|
12 |
+
from collections import OrderedDict
|
13 |
+
from datetime import datetime
|
14 |
+
import random,uvicorn
|
15 |
+
|
16 |
+
# 设置日志记录
|
17 |
+
logging.basicConfig(level=logging.INFO)
|
18 |
+
logger = logging.getLogger(__name__)
|
19 |
+
|
20 |
+
app = FastAPI()
|
21 |
+
|
22 |
+
# 配置
|
23 |
+
class Config(BaseModel):
|
24 |
+
# API 密钥
|
25 |
+
API_KEY: str = Field(
|
26 |
+
default="sk_gUXNcLwm0rnnEt55Mg8hq88",
|
27 |
+
description="API key for authentication"
|
28 |
+
)
|
29 |
+
|
30 |
+
# 最大历史记录数
|
31 |
+
MAX_HISTORY: int = Field(
|
32 |
+
default=30,
|
33 |
+
description="Maximum number of conversation histories to keep"
|
34 |
+
)
|
35 |
+
|
36 |
+
# API 域名
|
37 |
+
API_DOMAIN: str = Field(
|
38 |
+
default="https://ai-api.dangbei.net",
|
39 |
+
description="API Domain for requests"
|
40 |
+
)
|
41 |
+
|
42 |
+
# User Agents 列表
|
43 |
+
USER_AGENTS: List[str] = Field(
|
44 |
+
default=[
|
45 |
+
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
|
46 |
+
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
|
47 |
+
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36",
|
48 |
+
"Mozilla/5.0 (iPhone; CPU iPhone OS 16_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.0 Mobile/15E148 Safari/604.1",
|
49 |
+
"Mozilla/5.0 (iPad; CPU OS 16_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.0 Mobile/15E148 Safari/604.1"
|
50 |
+
],
|
51 |
+
description="List of User Agent strings for requests"
|
52 |
+
)
|
53 |
+
|
54 |
+
# 每个设备 ID 最大会话数
|
55 |
+
DEVICE_CONVERSATIONS_LIMIT: int = Field(
|
56 |
+
default=10,
|
57 |
+
description="Number of conversations before generating new device ID"
|
58 |
+
)
|
59 |
+
|
60 |
+
# 创建全局配置实例
|
61 |
+
config = Config()
|
62 |
+
|
63 |
+
# 辅助函数:验证 API 密钥
|
64 |
+
async def verify_api_key(authorization: str = Header(None)):
|
65 |
+
if not authorization:
|
66 |
+
raise HTTPException(status_code=401, detail="Missing API key")
|
67 |
+
|
68 |
+
api_key = authorization.replace("Bearer ", "").strip()
|
69 |
+
if api_key != config.API_KEY: # 使用配置中的 API_KEY
|
70 |
+
raise HTTPException(status_code=401, detail="Invalid API key")
|
71 |
+
return api_key
|
72 |
+
|
73 |
+
class Message(BaseModel):
|
74 |
+
role: str
|
75 |
+
content: str
|
76 |
+
|
77 |
+
class Config:
|
78 |
+
# 允许额外的字段
|
79 |
+
extra = "allow"
|
80 |
+
|
81 |
+
class ChatRequest(BaseModel):
|
82 |
+
model: str
|
83 |
+
messages: List[Union[dict, Message]] # 允许字典或 Message 对象
|
84 |
+
stream: bool = False
|
85 |
+
|
86 |
+
# 添加额外的可选字段,以适应更多的客户端请求
|
87 |
+
temperature: float | None = None
|
88 |
+
top_p: float | None = None
|
89 |
+
n: int | None = None
|
90 |
+
max_tokens: int | None = None
|
91 |
+
presence_penalty: float | None = None
|
92 |
+
frequency_penalty: float | None = None
|
93 |
+
user: str | None = None
|
94 |
+
|
95 |
+
class Config:
|
96 |
+
# 允许额外的字段
|
97 |
+
extra = "allow"
|
98 |
+
# 允许从字典直接构造
|
99 |
+
arbitrary_types_allowed = True
|
100 |
+
|
101 |
+
@property
|
102 |
+
def messages_as_dicts(self) -> List[dict]:
|
103 |
+
"""将消息转换为字典格式"""
|
104 |
+
return [
|
105 |
+
msg if isinstance(msg, dict) else msg.dict()
|
106 |
+
for msg in self.messages
|
107 |
+
]
|
108 |
+
|
109 |
+
class ChatHistory:
|
110 |
+
def __init__(self):
|
111 |
+
self.current_device_id = None
|
112 |
+
self.current_conversation_id = None
|
113 |
+
self.conversation_count = 0
|
114 |
+
self.total_conversations = 0 # 添加总会话计数
|
115 |
+
|
116 |
+
def get_or_create_ids(self, force_new=False) -> tuple[str, str]:
|
117 |
+
"""
|
118 |
+
获取或创建新的 device_id 和 conversation_id
|
119 |
+
|
120 |
+
Args:
|
121 |
+
force_new (bool): 是否强制创建新会话,用于清除上下文
|
122 |
+
|
123 |
+
Returns:
|
124 |
+
tuple[str, str]: (device_id, conversation_id)
|
125 |
+
"""
|
126 |
+
# 检查是否需要创建新的设备 ID
|
127 |
+
if (not self.current_device_id or
|
128 |
+
self.total_conversations >= config.DEVICE_CONVERSATIONS_LIMIT):
|
129 |
+
self.current_device_id = self._generate_device_id()
|
130 |
+
self.current_conversation_id = None
|
131 |
+
self.conversation_count = 0
|
132 |
+
self.total_conversations = 0
|
133 |
+
logger.info(f"Generated new device ID: {self.current_device_id}")
|
134 |
+
|
135 |
+
# 如果强制新建会话(清除上下文)或没有当前会话 ID
|
136 |
+
if force_new or not self.current_conversation_id:
|
137 |
+
self.current_conversation_id = None
|
138 |
+
self.conversation_count = 0
|
139 |
+
logger.info("Forcing new conversation")
|
140 |
+
|
141 |
+
return self.current_device_id, self.current_conversation_id
|
142 |
+
|
143 |
+
def add_conversation(self, conversation_id: str):
|
144 |
+
"""
|
145 |
+
添加新的对话记录
|
146 |
+
|
147 |
+
Args:
|
148 |
+
conversation_id (str): 新的会话 ID
|
149 |
+
"""
|
150 |
+
if not self.current_device_id:
|
151 |
+
return
|
152 |
+
|
153 |
+
self.current_conversation_id = conversation_id
|
154 |
+
self.conversation_count += 1
|
155 |
+
self.total_conversations += 1
|
156 |
+
logger.info(f"Added conversation {conversation_id} (count: {self.conversation_count}, total: {self.total_conversations})")
|
157 |
+
|
158 |
+
def _generate_device_id(self) -> str:
|
159 |
+
"""生成新的设备ID,并随机选择新的 USER_AGENT"""
|
160 |
+
# 随机选择新的 USER_AGENT
|
161 |
+
user_agent = random.choice(config.USER_AGENTS)
|
162 |
+
logger.info(f"Selected new User-Agent: {user_agent}")
|
163 |
+
|
164 |
+
uuid_str = uuid.uuid4().hex
|
165 |
+
nanoid_str = ''.join(random.choices(
|
166 |
+
"useandom26T198340PX75pxJACKVERYMINDBUSHWOLF_GQZbfghjklqvwyzrict",
|
167 |
+
k=20
|
168 |
+
))
|
169 |
+
return f"{uuid_str}_{nanoid_str}"
|
170 |
+
|
171 |
+
class Pipe:
|
172 |
+
def __init__(self):
|
173 |
+
self.data_prefix = "data:"
|
174 |
+
self.user_agent = random.choice(config.USER_AGENTS) # 初始化时随机选择一个 USER_AGENT
|
175 |
+
self.chat_history = ChatHistory()
|
176 |
+
# 添加支持联网的模型映射,保持实际请求时使用小写
|
177 |
+
self.search_models = {
|
178 |
+
"DeepSeek-R1-Search": "deepseek",
|
179 |
+
"DeepSeek-V3-Search": "deepseek",
|
180 |
+
"Doubao-Search": "doubao", # 显示用大写,映射用小写
|
181 |
+
"Qwen-Search": "qwen" # 显示用大写,映射用小写
|
182 |
+
}
|
183 |
+
|
184 |
+
def _build_full_prompt(self, messages: List[Dict]) -> str:
|
185 |
+
"""构建完整的提示,包含系统提示、聊天历史和当前问题"""
|
186 |
+
if not messages:
|
187 |
+
return ''
|
188 |
+
|
189 |
+
system_prompt = ''
|
190 |
+
history = []
|
191 |
+
last_user_message = ''
|
192 |
+
|
193 |
+
# 修改消息处理逻辑,直接使用字典访问
|
194 |
+
for msg in messages:
|
195 |
+
if msg['role'] == 'system' and not system_prompt:
|
196 |
+
system_prompt = msg['content']
|
197 |
+
elif msg['role'] == 'user':
|
198 |
+
history.append(f"user: {msg['content']}")
|
199 |
+
last_user_message = msg['content']
|
200 |
+
elif msg['role'] == 'assistant':
|
201 |
+
history.append(f"assistant: {msg['content']}")
|
202 |
+
|
203 |
+
# 构建最终提示
|
204 |
+
parts = []
|
205 |
+
if system_prompt:
|
206 |
+
parts.append(f"[System Prompt]\n{system_prompt}")
|
207 |
+
if len(history) > 1: # 如果有历史对话
|
208 |
+
parts.append(f"[Chat History]\n{chr(10).join(history[:-1])}")
|
209 |
+
parts.append(f"[Question]\n{last_user_message}")
|
210 |
+
|
211 |
+
return chr(10).join(parts)
|
212 |
+
|
213 |
+
async def pipe(self, body: dict) -> AsyncGenerator[Dict, None]:
|
214 |
+
thinking_state = {"thinking": -1}
|
215 |
+
|
216 |
+
try:
|
217 |
+
# 构建完整提示
|
218 |
+
full_prompt = self._build_full_prompt(body["messages"])
|
219 |
+
|
220 |
+
# 修改 force_new_context 的判断逻辑
|
221 |
+
force_new_context = False
|
222 |
+
messages = body["messages"]
|
223 |
+
if len(messages) == 1: # 只有一条消息时,说明是新对话
|
224 |
+
force_new_context = True
|
225 |
+
elif len(messages) >= 2: # 检查是否清除了历史
|
226 |
+
last_two = messages[-2:]
|
227 |
+
if last_two[0]["role"] == "user" and last_two[1]["role"] == "user":
|
228 |
+
force_new_context = True
|
229 |
+
|
230 |
+
# 获取或创建设备ID和会话ID
|
231 |
+
device_id, conversation_id = self.chat_history.get_or_create_ids(force_new_context)
|
232 |
+
|
233 |
+
# 添加会话信息日志
|
234 |
+
logger.info(f"Current session - Device ID: {device_id}, Conversation ID: {conversation_id}, Force new: {force_new_context}, Messages count: {len(messages)}")
|
235 |
+
|
236 |
+
# 如果没有会话ID,创建新的会话
|
237 |
+
if not conversation_id:
|
238 |
+
conversation_id = await self._create_conversation(device_id)
|
239 |
+
if not conversation_id:
|
240 |
+
yield {"error": "Failed to create conversation"}
|
241 |
+
return
|
242 |
+
# 保存新的对话记录
|
243 |
+
self.chat_history.add_conversation(conversation_id)
|
244 |
+
logger.info(f"Created new conversation: {conversation_id}")
|
245 |
+
|
246 |
+
# 模型名称处理
|
247 |
+
model_name = None
|
248 |
+
is_search_model = body["model"].endswith("-Search")
|
249 |
+
if is_search_model:
|
250 |
+
# 如果是搜索模型,使用映射的基础模型名
|
251 |
+
base_model = body["model"].replace("-Search", "")
|
252 |
+
model_name = self.search_models.get(body["model"], base_model.lower())
|
253 |
+
else:
|
254 |
+
# 非搜索模型使用原有逻辑
|
255 |
+
is_deepseek_model = body["model"] in ["DeepSeek-R1", "DeepSeek-V3"]
|
256 |
+
model_name = "deepseek" if is_deepseek_model else body["model"].lower() # 确保转换为小写
|
257 |
+
|
258 |
+
# 确定 userAction 参数
|
259 |
+
user_action = ""
|
260 |
+
if "DeepSeek-R1" in body["model"]:
|
261 |
+
user_action = "deep"
|
262 |
+
if is_search_model:
|
263 |
+
# 如果已经有值,添加逗号分隔
|
264 |
+
if user_action:
|
265 |
+
user_action += ",online"
|
266 |
+
else:
|
267 |
+
user_action = "online" # 为联网模型设置 userAction 为 "online"
|
268 |
+
|
269 |
+
payload = {
|
270 |
+
"stream": True,
|
271 |
+
"botCode": "AI_SEARCH",
|
272 |
+
"userAction": user_action,
|
273 |
+
"model": model_name,
|
274 |
+
"conversationId": conversation_id,
|
275 |
+
"question": full_prompt,
|
276 |
+
}
|
277 |
+
|
278 |
+
timestamp = str(int(time.time()))
|
279 |
+
nonce = self._nanoid(21)
|
280 |
+
sign = self._generate_sign(timestamp, payload, nonce)
|
281 |
+
|
282 |
+
headers = {
|
283 |
+
"Origin": "https://ai.dangbei.com",
|
284 |
+
"Referer": "https://ai.dangbei.com/",
|
285 |
+
"User-Agent": self.user_agent,
|
286 |
+
"deviceId": device_id,
|
287 |
+
"nonce": nonce,
|
288 |
+
"sign": sign,
|
289 |
+
"timestamp": timestamp,
|
290 |
+
}
|
291 |
+
|
292 |
+
api = f"{config.API_DOMAIN}/ai-search/chatApi/v1/chat" # 使用配置中的 API_DOMAIN
|
293 |
+
|
294 |
+
async with httpx.AsyncClient() as client:
|
295 |
+
async with client.stream("POST", api, json=payload, headers=headers, timeout=1200) as response:
|
296 |
+
if response.status_code != 200:
|
297 |
+
error = await response.aread()
|
298 |
+
yield {"error": self._format_error(response.status_code, error)}
|
299 |
+
return
|
300 |
+
|
301 |
+
card_messages = [] # 用于收集卡片消息
|
302 |
+
|
303 |
+
async for line in response.aiter_lines():
|
304 |
+
if not line.startswith(self.data_prefix):
|
305 |
+
continue
|
306 |
+
|
307 |
+
json_str = line[len(self.data_prefix):]
|
308 |
+
|
309 |
+
try:
|
310 |
+
data = json.loads(json_str)
|
311 |
+
except json.JSONDecodeError as e:
|
312 |
+
yield {"error": f"JSONDecodeError: {str(e)}", "data": json_str}
|
313 |
+
return
|
314 |
+
|
315 |
+
if data.get("type") == "answer":
|
316 |
+
content = data.get("content")
|
317 |
+
content_type = data.get("content_type")
|
318 |
+
|
319 |
+
# 处理思考状态
|
320 |
+
if thinking_state["thinking"] == -1 and content_type == "thinking":
|
321 |
+
thinking_state["thinking"] = 0
|
322 |
+
yield {"choices": [{"delta": {"content": "<think>\n\n"}, "finish_reason": None}]}
|
323 |
+
elif thinking_state["thinking"] == 0 and content_type == "text":
|
324 |
+
thinking_state["thinking"] = 1
|
325 |
+
yield {"choices": [{"delta": {"content": "\n"}, "finish_reason": None}]}
|
326 |
+
yield {"choices": [{"delta": {"content": "</think>"}, "finish_reason": None}]}
|
327 |
+
yield {"choices": [{"delta": {"content": "\n\n"}, "finish_reason": None}]}
|
328 |
+
|
329 |
+
# 处理卡片内容
|
330 |
+
if content_type == "card":
|
331 |
+
try:
|
332 |
+
card_content = json.loads(content)
|
333 |
+
card_items = card_content["cardInfo"]["cardItems"]
|
334 |
+
markdown_output = "\n\n---\n\n"
|
335 |
+
|
336 |
+
# 处理搜索关键词(type: 2001)
|
337 |
+
search_keywords = next((item for item in card_items if item["type"] == "2001"), None)
|
338 |
+
if search_keywords:
|
339 |
+
keywords = json.loads(search_keywords["content"])
|
340 |
+
markdown_output += f"搜索关键字:{'; '.join(keywords)}\n"
|
341 |
+
|
342 |
+
# 处理搜索结果(type: 2002)
|
343 |
+
search_results = next((item for item in card_items if item["type"] == "2002"), None)
|
344 |
+
if search_results:
|
345 |
+
results = json.loads(search_results["content"])
|
346 |
+
markdown_output += f"共找到 {len(results)} 个搜索结果:\n"
|
347 |
+
for result in results:
|
348 |
+
markdown_output += f"[{result['idIndex']}] [{result['name']}]({result['url']}) 来源:{result['siteName']}\n"
|
349 |
+
|
350 |
+
card_messages.append(markdown_output)
|
351 |
+
except Exception as e:
|
352 |
+
logger.error(f"Error processing card: {str(e)}")
|
353 |
+
|
354 |
+
# 处理普通文本内容
|
355 |
+
if content and content_type in ["text", "thinking"]:
|
356 |
+
yield {"choices": [{"delta": {"content": content}, "finish_reason": None}]}
|
357 |
+
|
358 |
+
# 在最后输出所有卡片消息
|
359 |
+
if card_messages:
|
360 |
+
yield {"choices": [{"delta": {"content": "".join(card_messages)}, "finish_reason": None}]}
|
361 |
+
|
362 |
+
# 在最后添加元数据
|
363 |
+
yield {"choices": [{"delta": {"meta": {
|
364 |
+
"device_id": device_id,
|
365 |
+
"conversation_id": conversation_id
|
366 |
+
}}, "finish_reason": None}]}
|
367 |
+
|
368 |
+
except Exception as e:
|
369 |
+
logger.error(f"Error in pipe: {str(e)}")
|
370 |
+
yield {"error": self._format_exception(e)}
|
371 |
+
|
372 |
+
def _format_error(self, status_code: int, error: bytes) -> str:
|
373 |
+
error_str = error.decode(errors="ignore") if isinstance(error, bytes) else error
|
374 |
+
return json.dumps({"error": f"HTTP {status_code}: {error_str}"}, ensure_ascii=False)
|
375 |
+
|
376 |
+
def _format_exception(self, e: Exception) -> str:
|
377 |
+
return json.dumps({"error": f"{type(e).__name__}: {str(e)}"}, ensure_ascii=False)
|
378 |
+
|
379 |
+
def _nanoid(self, size=21) -> str:
|
380 |
+
url_alphabet = "useandom-26T198340PX75pxJACKVERYMINDBUSHWOLF_GQZbfghjklqvwyzrict"
|
381 |
+
random_bytes = secrets.token_bytes(size)
|
382 |
+
return "".join([url_alphabet[b & 63] for b in reversed(random_bytes)])
|
383 |
+
|
384 |
+
def _generate_sign(self, timestamp: str, payload: dict, nonce: str) -> str:
|
385 |
+
payload_str = json.dumps(payload, separators=(",", ":"), ensure_ascii=False)
|
386 |
+
sign_str = f"{timestamp}{payload_str}{nonce}"
|
387 |
+
return hashlib.md5(sign_str.encode("utf-8")).hexdigest().upper()
|
388 |
+
|
389 |
+
async def _create_conversation(self, device_id: str) -> str:
|
390 |
+
"""创建新的会话"""
|
391 |
+
payload = {"botCode": "AI_SEARCH"}
|
392 |
+
timestamp = str(int(time.time()))
|
393 |
+
nonce = self._nanoid(21)
|
394 |
+
sign = self._generate_sign(timestamp, payload, nonce)
|
395 |
+
|
396 |
+
headers = {
|
397 |
+
"Origin": "https://ai.dangbei.com",
|
398 |
+
"Referer": "https://ai.dangbei.com/",
|
399 |
+
"User-Agent": self.user_agent,
|
400 |
+
"deviceId": device_id,
|
401 |
+
"nonce": nonce,
|
402 |
+
"sign": sign,
|
403 |
+
"timestamp": timestamp,
|
404 |
+
}
|
405 |
+
|
406 |
+
api = f"{config.API_DOMAIN}/ai-search/conversationApi/v1/create"
|
407 |
+
try:
|
408 |
+
async with httpx.AsyncClient() as client:
|
409 |
+
response = await client.post(api, json=payload, headers=headers)
|
410 |
+
if response.status_code == 200:
|
411 |
+
data = response.json()
|
412 |
+
if data.get("success"):
|
413 |
+
return data["data"]["conversationId"]
|
414 |
+
except Exception as e:
|
415 |
+
logger.error(f"Error creating conversation: {str(e)}")
|
416 |
+
return None
|
417 |
+
|
418 |
+
# 创建实例
|
419 |
+
pipe = Pipe()
|
420 |
+
|
421 |
+
@app.post("/v1/chat/completions")
|
422 |
+
async def chat(request: ChatRequest, authorization: str = Header(None)):
|
423 |
+
"""
|
424 |
+
OpenAI API 兼容的 Chat 端点
|
425 |
+
"""
|
426 |
+
# 添加请求日志
|
427 |
+
logger.info(f"Received chat request: {request.model_dump()}")
|
428 |
+
|
429 |
+
await verify_api_key(authorization)
|
430 |
+
|
431 |
+
# 使用 messages_as_dicts 属性
|
432 |
+
request_data = request.model_dump()
|
433 |
+
request_data['messages'] = request.messages_as_dicts
|
434 |
+
|
435 |
+
async def response_generator():
|
436 |
+
"""流式响应生成器"""
|
437 |
+
thinking_content = []
|
438 |
+
is_thinking = False
|
439 |
+
|
440 |
+
async for chunk in pipe.pipe(request_data):
|
441 |
+
if "choices" in chunk and chunk["choices"]:
|
442 |
+
delta = chunk["choices"][0]["delta"]
|
443 |
+
if "content" in delta:
|
444 |
+
content = delta["content"]
|
445 |
+
if content == "<think>\n":
|
446 |
+
is_thinking = True
|
447 |
+
elif content == "\n</think>\n\n":
|
448 |
+
is_thinking = False
|
449 |
+
if is_thinking and content != "<think>\n":
|
450 |
+
thinking_content.append(content)
|
451 |
+
|
452 |
+
yield f"data: {json.dumps(chunk, ensure_ascii=False)}\n\n"
|
453 |
+
yield "data: [DONE]\n\n"
|
454 |
+
|
455 |
+
if request.stream:
|
456 |
+
return StreamingResponse(response_generator(), media_type="text/event-stream")
|
457 |
+
|
458 |
+
# 非流式响应
|
459 |
+
content = ""
|
460 |
+
meta = None
|
461 |
+
try:
|
462 |
+
async for chunk in pipe.pipe(request_data):
|
463 |
+
if "choices" in chunk and chunk["choices"]:
|
464 |
+
delta = chunk["choices"][0]["delta"]
|
465 |
+
if "content" in delta:
|
466 |
+
content += delta["content"]
|
467 |
+
if "meta" in delta:
|
468 |
+
meta = delta["meta"]
|
469 |
+
except Exception as e:
|
470 |
+
logger.error(f"Error processing chat request: {str(e)}")
|
471 |
+
raise HTTPException(status_code=500, detail="Internal Server Error")
|
472 |
+
|
473 |
+
parts = content.split("\n\n\n", 1)
|
474 |
+
reasoning_content = parts[0] if len(parts) > 0 else ""
|
475 |
+
content = parts[1] if len(parts) > 1 else ""
|
476 |
+
|
477 |
+
# 处理嵌套的 think 标签和特殊字符
|
478 |
+
if reasoning_content:
|
479 |
+
# 先尝试找到最外层的 think 标签
|
480 |
+
start_idx = reasoning_content.find("<think>")
|
481 |
+
end_idx = reasoning_content.rfind("</think>")
|
482 |
+
|
483 |
+
if start_idx != -1 and end_idx != -1:
|
484 |
+
# 如果找到了完整的外层标签,提取其中的内容
|
485 |
+
inner_content = reasoning_content[start_idx + 7:end_idx].strip()
|
486 |
+
# 移除内部的 think 标签
|
487 |
+
inner_content = inner_content.replace("<think>", "").replace("</think>", "").strip()
|
488 |
+
reasoning_content = f"<think>\n{inner_content}\n</think>"
|
489 |
+
else:
|
490 |
+
# 如果没有找到完整的标签,则移除所有 think 标签并重新添加
|
491 |
+
reasoning_content = reasoning_content.replace("<think>", "").replace("</think>", "").strip()
|
492 |
+
reasoning_content = f"<think>\n{reasoning_content}\n</think>"
|
493 |
+
|
494 |
+
return {
|
495 |
+
"id": str(uuid.uuid4()),
|
496 |
+
"object": "chat.completion",
|
497 |
+
"created": int(time.time()),
|
498 |
+
"model": request.model,
|
499 |
+
"choices": [{
|
500 |
+
"message": {
|
501 |
+
"role": "assistant",
|
502 |
+
"reasoning_content": reasoning_content,
|
503 |
+
"content": content,
|
504 |
+
"meta": meta
|
505 |
+
},
|
506 |
+
"finish_reason": "stop"
|
507 |
+
}]
|
508 |
+
}
|
509 |
+
|
510 |
+
@app.get("/v1/models")
|
511 |
+
async def get_models(authorization: str = Header(None)):
|
512 |
+
# 验证 API 密钥
|
513 |
+
await verify_api_key(authorization)
|
514 |
+
|
515 |
+
current_time = int(time.time())
|
516 |
+
return {
|
517 |
+
"object": "list",
|
518 |
+
"data": [
|
519 |
+
# 原始模型
|
520 |
+
{
|
521 |
+
"id": "DeepSeek-R1",
|
522 |
+
"object": "model",
|
523 |
+
"created": current_time,
|
524 |
+
"owned_by": "library"
|
525 |
+
},
|
526 |
+
{
|
527 |
+
"id": "DeepSeek-V3",
|
528 |
+
"object": "model",
|
529 |
+
"created": current_time,
|
530 |
+
"owned_by": "library"
|
531 |
+
},
|
532 |
+
{
|
533 |
+
"id": "Doubao", # 改为大写开头
|
534 |
+
"object": "model",
|
535 |
+
"created": current_time,
|
536 |
+
"owned_by": "library"
|
537 |
+
},
|
538 |
+
{
|
539 |
+
"id": "Qwen", # 改为大写开头
|
540 |
+
"object": "model",
|
541 |
+
"created": current_time,
|
542 |
+
"owned_by": "library"
|
543 |
+
},
|
544 |
+
# 支持联网的模型
|
545 |
+
{
|
546 |
+
"id": "DeepSeek-R1-Search",
|
547 |
+
"object": "model",
|
548 |
+
"created": current_time,
|
549 |
+
"owned_by": "library",
|
550 |
+
"features": ["online_search"]
|
551 |
+
},
|
552 |
+
{
|
553 |
+
"id": "DeepSeek-V3-Search",
|
554 |
+
"object": "model",
|
555 |
+
"created": current_time,
|
556 |
+
"owned_by": "library",
|
557 |
+
"features": ["online_search"]
|
558 |
+
},
|
559 |
+
{
|
560 |
+
"id": "Doubao-Search", # 改为大写开头
|
561 |
+
"object": "model",
|
562 |
+
"created": current_time,
|
563 |
+
"owned_by": "library",
|
564 |
+
"features": ["online_search"]
|
565 |
+
},
|
566 |
+
{
|
567 |
+
"id": "Qwen-Search", # 改为大写开头
|
568 |
+
"object": "model",
|
569 |
+
"created": current_time,
|
570 |
+
"owned_by": "library",
|
571 |
+
"features": ["online_search"]
|
572 |
+
}
|
573 |
+
]
|
574 |
+
}
|
575 |
+
@app.get("/")
|
576 |
+
def index():
|
577 |
+
return "it's work!"
|
578 |
+
|
579 |
+
|
580 |
+
if __name__ == "__main__":
|
581 |
+
uvicorn.run(app, host="0.0.0.0", port=8000)
|
docker-compose.yml
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
version: '3.8'
|
2 |
+
|
3 |
+
services:
|
4 |
+
api:
|
5 |
+
build:
|
6 |
+
context: .
|
7 |
+
dockerfile: Dockerfile
|
8 |
+
image: dangbei/api-proxy:latest # 添加了镜像名称配置
|
9 |
+
container_name: db-api-service # 添加了容器名配置
|
10 |
+
ports:
|
11 |
+
- "8000:8000"
|
12 |
+
environment:
|
13 |
+
- API_KEY=sk_gUXNcLwm0rnnEt55Mg8hq88
|
14 |
+
- API_DOMAIN=https://ai-api.dangbei.net
|
15 |
+
restart: unless-stopped
|
16 |
+
healthcheck:
|
17 |
+
test: ["CMD", "curl", "-f", "http://localhost:8000"]
|
18 |
+
interval: 30s
|
19 |
+
timeout: 10s
|
20 |
+
retries: 3
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi>=0.104.1
|
2 |
+
httpx[http2]>=0.25.2
|
3 |
+
pydantic>=2.5.2
|
4 |
+
uvicorn>=0.24.0
|
vercel.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"version": 2,
|
3 |
+
"builds": [
|
4 |
+
{
|
5 |
+
"src": "app.py",
|
6 |
+
"use": "@vercel/python"
|
7 |
+
}
|
8 |
+
],
|
9 |
+
"routes": [
|
10 |
+
{
|
11 |
+
"src": "/(.*)",
|
12 |
+
"dest": "app.py"
|
13 |
+
}
|
14 |
+
],
|
15 |
+
"env": {
|
16 |
+
"API_KEY": "sk_gUXNcLwm0rnnEt55Mg8hq88",
|
17 |
+
"API_DOMAIN": "https://ai-api.dangbei.net",
|
18 |
+
"MAX_HISTORY": "30",
|
19 |
+
"DEVICE_CONVERSATIONS_LIMIT": "10"
|
20 |
+
}
|
21 |
+
}
|