Update app.py
Browse files
app.py
CHANGED
@@ -9,22 +9,23 @@ from typing import Optional, Dict, List, Any
|
|
9 |
import uvicorn
|
10 |
from pydantic import BaseModel
|
11 |
|
12 |
-
#
|
13 |
-
API_CHALLENGE_URL = 'https://api.eqing.tech/api/altcaptcha/challenge'
|
14 |
-
NEXTWAY_CHAT_URL = 'https://origin.eqing.tech/api/openai/v1/chat/completions'
|
15 |
-
CREDENTIAL_EXPIRY_MARGIN = 60 * 1000
|
16 |
-
PORT = 7860
|
17 |
-
API_ENDPOINT = '/v1/chat/completions'
|
18 |
-
MODEL_NAME = "gpt-4o-free"
|
19 |
-
REQUEST_TIMEOUT = 480
|
20 |
-
MAX_RETRIES = 3
|
21 |
-
RETRY_DELAY = 1
|
22 |
-
|
23 |
-
# Global variables
|
24 |
-
current_credential = None
|
25 |
-
credential_expiry = None
|
26 |
-
is_refreshing_credential = False
|
27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
MODEL_MAPPING = {
|
29 |
'gpt-4o-all-lite': 'gpt-4o-mini',
|
30 |
'gpt-4o-image': 'gpt-4o-mini-image-free',
|
@@ -35,6 +36,7 @@ MODEL_MAPPING = {
|
|
35 |
app = FastAPI()
|
36 |
|
37 |
class ChatRequest(BaseModel):
|
|
|
38 |
messages: List[Dict[str, str]]
|
39 |
model: Optional[str]
|
40 |
stream: Optional[bool] = True
|
@@ -44,59 +46,80 @@ class ChatRequest(BaseModel):
|
|
44 |
top_p: Optional[float] = 1
|
45 |
max_tokens: Optional[int] = 4000
|
46 |
|
47 |
-
async def
|
48 |
-
|
49 |
-
|
50 |
-
async for chunk in response.content:
|
51 |
-
buffer += chunk.decode()
|
52 |
-
lines = buffer.split('\n')
|
53 |
-
|
54 |
-
# Process complete lines
|
55 |
-
for line in lines[:-1]:
|
56 |
-
line = line.strip()
|
57 |
-
if line.startswith('data: '):
|
58 |
-
content = process_data_line(line[5:].strip())
|
59 |
-
if content:
|
60 |
-
complete_response += content
|
61 |
-
|
62 |
-
# Keep the last incomplete line in buffer
|
63 |
-
buffer = lines[-1] if lines else ""
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
|
89 |
async def solve_challenge(challenge: str, salt: str, algorithm: str = "SHA-512", max_number: int = 1000000):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
start_time = time.time()
|
91 |
|
92 |
for number in range(max_number):
|
93 |
hash_value = await verify_hash(salt, number, algorithm)
|
94 |
if hash_value == challenge:
|
95 |
-
return {
|
|
|
|
|
|
|
96 |
|
97 |
return None
|
98 |
|
99 |
async def verify_hash(salt: str, number: int, algorithm: str) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
input_str = f"{salt}{number}"
|
101 |
|
102 |
if algorithm == "SHA-512":
|
@@ -106,16 +129,22 @@ async def verify_hash(salt: str, number: int, algorithm: str) -> str:
|
|
106 |
hash_obj = hashlib.sha256(input_str.encode())
|
107 |
return hash_obj.hexdigest()
|
108 |
else:
|
109 |
-
raise ValueError(f"
|
110 |
|
111 |
async def generate_credential():
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
global current_credential, credential_expiry
|
113 |
|
114 |
async with aiohttp.ClientSession() as session:
|
115 |
try:
|
116 |
async with session.get(API_CHALLENGE_URL) as response:
|
117 |
if response.status != 200:
|
118 |
-
print(f"
|
119 |
return None
|
120 |
|
121 |
data = await response.json()
|
@@ -127,7 +156,7 @@ async def generate_credential():
|
|
127 |
)
|
128 |
|
129 |
if not solution:
|
130 |
-
print("
|
131 |
return None
|
132 |
|
133 |
credential_obj = {
|
@@ -145,10 +174,16 @@ async def generate_credential():
|
|
145 |
return {"credential": credential, "expiry": expiry}
|
146 |
|
147 |
except Exception as e:
|
148 |
-
print(f"
|
149 |
return None
|
150 |
|
151 |
async def get_credential():
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
global current_credential, credential_expiry, is_refreshing_credential
|
153 |
|
154 |
if (not current_credential or
|
@@ -165,58 +200,27 @@ async def get_credential():
|
|
165 |
finally:
|
166 |
is_refreshing_credential = False
|
167 |
else:
|
168 |
-
await asyncio.sleep(2)
|
169 |
|
170 |
return current_credential
|
171 |
|
172 |
-
async def handle_chat_request(model: str, messages: List[Dict[str, str]], retries=0):
|
173 |
-
captcha_token = await get_credential()
|
174 |
-
if not captcha_token:
|
175 |
-
return None
|
176 |
-
|
177 |
-
body = {
|
178 |
-
"messages": messages,
|
179 |
-
"stream": True,
|
180 |
-
"model": model,
|
181 |
-
"temperature": 0.5,
|
182 |
-
"presence_penalty": 0,
|
183 |
-
"frequency_penalty": 0,
|
184 |
-
"top_p": 1,
|
185 |
-
"max_tokens": 4000,
|
186 |
-
"captchaToken": captcha_token
|
187 |
-
}
|
188 |
-
|
189 |
-
async with aiohttp.ClientSession() as session:
|
190 |
-
try:
|
191 |
-
async with session.post(
|
192 |
-
NEXTWAY_CHAT_URL,
|
193 |
-
json=body,
|
194 |
-
timeout=aiohttp.ClientTimeout(total=REQUEST_TIMEOUT)
|
195 |
-
) as response:
|
196 |
-
if response.status != 200:
|
197 |
-
if retries < MAX_RETRIES:
|
198 |
-
await asyncio.sleep(RETRY_DELAY)
|
199 |
-
return await handle_chat_request(model, messages, retries + 1)
|
200 |
-
return None
|
201 |
-
|
202 |
-
return await extract_streaming_content(response)
|
203 |
-
|
204 |
-
except (aiohttp.ClientError, asyncio.TimeoutError) as e:
|
205 |
-
print(f"Error in chat request: {e}")
|
206 |
-
if retries < MAX_RETRIES:
|
207 |
-
await asyncio.sleep(RETRY_DELAY)
|
208 |
-
return await handle_chat_request(model, messages, retries + 1)
|
209 |
-
return None
|
210 |
-
|
211 |
@app.post(API_ENDPOINT)
|
212 |
async def chat_endpoint(request: ChatRequest):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
213 |
try:
|
214 |
model = MODEL_MAPPING.get(request.model, request.model or MODEL_NAME)
|
215 |
response_content = await handle_chat_request(model, request.messages)
|
216 |
|
217 |
if response_content is None:
|
218 |
return Response(
|
219 |
-
content="
|
220 |
status_code=500
|
221 |
)
|
222 |
|
@@ -234,8 +238,79 @@ async def chat_endpoint(request: ChatRequest):
|
|
234 |
}
|
235 |
|
236 |
except Exception as e:
|
237 |
-
print(f"
|
238 |
-
return Response(content="
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
239 |
|
240 |
if __name__ == "__main__":
|
241 |
uvicorn.run(app, host="0.0.0.0", port=PORT)
|
|
|
9 |
import uvicorn
|
10 |
from pydantic import BaseModel
|
11 |
|
12 |
+
# 配置参数
|
13 |
+
API_CHALLENGE_URL = 'https://api.eqing.tech/api/altcaptcha/challenge' # 验证码挑战接口
|
14 |
+
NEXTWAY_CHAT_URL = 'https://origin.eqing.tech/api/openai/v1/chat/completions' # 聊天完成接口
|
15 |
+
CREDENTIAL_EXPIRY_MARGIN = 60 * 1000 # 凭证过期边界时间(60秒,毫秒单位)
|
16 |
+
PORT = 7860 # 服务器端口
|
17 |
+
API_ENDPOINT = '/v1/chat/completions' # API端点
|
18 |
+
MODEL_NAME = "gpt-4o-free" # 默认模型名称
|
19 |
+
REQUEST_TIMEOUT = 480 # 请求超时时间(秒)
|
20 |
+
MAX_RETRIES = 3 # 最大重试次数
|
21 |
+
RETRY_DELAY = 1 # 重试延迟(秒)
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
# 全局变量
|
24 |
+
current_credential = None # 当前凭证
|
25 |
+
credential_expiry = None # 凭证过期时间
|
26 |
+
is_refreshing_credential = False # 是否正在刷新凭证
|
27 |
+
|
28 |
+
# 模型映射字典
|
29 |
MODEL_MAPPING = {
|
30 |
'gpt-4o-all-lite': 'gpt-4o-mini',
|
31 |
'gpt-4o-image': 'gpt-4o-mini-image-free',
|
|
|
36 |
app = FastAPI()
|
37 |
|
38 |
class ChatRequest(BaseModel):
|
39 |
+
"""聊天请求的数据模型"""
|
40 |
messages: List[Dict[str, str]]
|
41 |
model: Optional[str]
|
42 |
stream: Optional[bool] = True
|
|
|
46 |
top_p: Optional[float] = 1
|
47 |
max_tokens: Optional[int] = 4000
|
48 |
|
49 |
+
async def extract_content(text: str) -> str:
|
50 |
+
"""
|
51 |
+
从响应文本中提取内容
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
+
Args:
|
54 |
+
text: 响应文本
|
55 |
+
Returns:
|
56 |
+
提取的AI响应内容
|
57 |
+
"""
|
58 |
+
lines = text.split('\n')
|
59 |
+
ai_response = ''
|
60 |
+
ignored_id = 'chatcmpl-nxaTnETiUyAVBjdRwlr38Yt3'
|
61 |
+
created = 1687070102
|
62 |
|
63 |
+
for line in lines:
|
64 |
+
line = line.strip()
|
65 |
+
if line.startswith('data:'):
|
66 |
+
data_str = line[5:].strip()
|
67 |
+
if not data_str or data_str in ['[ORIGIN]', '[DONE]']:
|
68 |
+
continue
|
69 |
+
|
70 |
+
try:
|
71 |
+
json_data = json.loads(data_str)
|
72 |
+
# 跳过特定的响应
|
73 |
+
if json_data.get('id') == ignored_id or json_data.get('created') == created:
|
74 |
+
continue
|
75 |
+
|
76 |
+
# 提取内容
|
77 |
+
if (json_data.get('choices') and
|
78 |
+
json_data['choices'][0].get('delta') and
|
79 |
+
'content' in json_data['choices'][0]['delta']):
|
80 |
+
content = json_data['choices'][0]['delta']['content']
|
81 |
+
ai_response += content
|
82 |
+
|
83 |
+
except json.JSONDecodeError:
|
84 |
+
print(f'跳过非JSON数据')
|
85 |
+
|
86 |
+
return ai_response
|
87 |
|
88 |
async def solve_challenge(challenge: str, salt: str, algorithm: str = "SHA-512", max_number: int = 1000000):
|
89 |
+
"""
|
90 |
+
解决验证码挑战
|
91 |
+
|
92 |
+
Args:
|
93 |
+
challenge: 挑战字符串
|
94 |
+
salt: 盐值
|
95 |
+
algorithm: 哈希算法
|
96 |
+
max_number: 最大尝试次数
|
97 |
+
Returns:
|
98 |
+
解决方案字典
|
99 |
+
"""
|
100 |
start_time = time.time()
|
101 |
|
102 |
for number in range(max_number):
|
103 |
hash_value = await verify_hash(salt, number, algorithm)
|
104 |
if hash_value == challenge:
|
105 |
+
return {
|
106 |
+
"number": number,
|
107 |
+
"took": int((time.time() - start_time) * 1000)
|
108 |
+
}
|
109 |
|
110 |
return None
|
111 |
|
112 |
async def verify_hash(salt: str, number: int, algorithm: str) -> str:
|
113 |
+
"""
|
114 |
+
验证哈希值
|
115 |
+
|
116 |
+
Args:
|
117 |
+
salt: 盐值
|
118 |
+
number: 数字
|
119 |
+
algorithm: 哈希算法
|
120 |
+
Returns:
|
121 |
+
哈希字符串
|
122 |
+
"""
|
123 |
input_str = f"{salt}{number}"
|
124 |
|
125 |
if algorithm == "SHA-512":
|
|
|
129 |
hash_obj = hashlib.sha256(input_str.encode())
|
130 |
return hash_obj.hexdigest()
|
131 |
else:
|
132 |
+
raise ValueError(f"不支持的算法: {algorithm}")
|
133 |
|
134 |
async def generate_credential():
|
135 |
+
"""
|
136 |
+
生成新的凭证
|
137 |
+
|
138 |
+
Returns:
|
139 |
+
凭证信息字典
|
140 |
+
"""
|
141 |
global current_credential, credential_expiry
|
142 |
|
143 |
async with aiohttp.ClientSession() as session:
|
144 |
try:
|
145 |
async with session.get(API_CHALLENGE_URL) as response:
|
146 |
if response.status != 200:
|
147 |
+
print(f"验证码请求失败,状态码: {response.status}")
|
148 |
return None
|
149 |
|
150 |
data = await response.json()
|
|
|
156 |
)
|
157 |
|
158 |
if not solution:
|
159 |
+
print("解决验证码挑战失败")
|
160 |
return None
|
161 |
|
162 |
credential_obj = {
|
|
|
174 |
return {"credential": credential, "expiry": expiry}
|
175 |
|
176 |
except Exception as e:
|
177 |
+
print(f"生成凭证时出错: {e}")
|
178 |
return None
|
179 |
|
180 |
async def get_credential():
|
181 |
+
"""
|
182 |
+
获取有效的凭证
|
183 |
+
|
184 |
+
Returns:
|
185 |
+
当前有效的凭证
|
186 |
+
"""
|
187 |
global current_credential, credential_expiry, is_refreshing_credential
|
188 |
|
189 |
if (not current_credential or
|
|
|
200 |
finally:
|
201 |
is_refreshing_credential = False
|
202 |
else:
|
203 |
+
await asyncio.sleep(2) # 等待其他进程完成凭证刷新
|
204 |
|
205 |
return current_credential
|
206 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
207 |
@app.post(API_ENDPOINT)
|
208 |
async def chat_endpoint(request: ChatRequest):
|
209 |
+
"""
|
210 |
+
聊天API端点
|
211 |
+
|
212 |
+
Args:
|
213 |
+
request: 聊天请求对象
|
214 |
+
Returns:
|
215 |
+
聊天响应
|
216 |
+
"""
|
217 |
try:
|
218 |
model = MODEL_MAPPING.get(request.model, request.model or MODEL_NAME)
|
219 |
response_content = await handle_chat_request(model, request.messages)
|
220 |
|
221 |
if response_content is None:
|
222 |
return Response(
|
223 |
+
content="从API获取响应失败",
|
224 |
status_code=500
|
225 |
)
|
226 |
|
|
|
238 |
}
|
239 |
|
240 |
except Exception as e:
|
241 |
+
print(f"处理聊天请求时出错: {e}")
|
242 |
+
return Response(content="内部服务器错误", status_code=500)
|
243 |
+
|
244 |
+
async def handle_chat_request(model: str, messages: List[Dict[str, str]]):
|
245 |
+
"""
|
246 |
+
处理聊天请求
|
247 |
+
|
248 |
+
Args:
|
249 |
+
model: 模型名称
|
250 |
+
messages: 消息列表
|
251 |
+
Returns:
|
252 |
+
聊天响应内容
|
253 |
+
"""
|
254 |
+
captcha_token = await get_credential()
|
255 |
+
if not captcha_token:
|
256 |
+
return None
|
257 |
+
|
258 |
+
body = {
|
259 |
+
"messages": messages,
|
260 |
+
"stream": True,
|
261 |
+
"model": model,
|
262 |
+
"temperature": 0.5,
|
263 |
+
"presence_penalty": 0,
|
264 |
+
"frequency_penalty": 0,
|
265 |
+
"top_p": 1,
|
266 |
+
"max_tokens": 4000,
|
267 |
+
"captchaToken": captcha_token
|
268 |
+
}
|
269 |
+
|
270 |
+
timeout = aiohttp.ClientTimeout(total=REQUEST_TIMEOUT)
|
271 |
+
async with aiohttp.ClientSession(timeout=timeout) as session:
|
272 |
+
try:
|
273 |
+
async with session.post(NEXTWAY_CHAT_URL, json=body) as response:
|
274 |
+
if response.status != 200:
|
275 |
+
print(f"请求失败,状态码: {response.status}")
|
276 |
+
return None
|
277 |
+
|
278 |
+
complete_response = ""
|
279 |
+
buffer = "" # 用于存储未完成的数据块
|
280 |
+
|
281 |
+
async for chunk in response.content.iter_chunks():
|
282 |
+
if chunk[0]: # chunk[0] 是数据,chunk[1] 是布尔值表示是否是最后一块
|
283 |
+
try:
|
284 |
+
chunk_text = (buffer + chunk[0].decode()).strip()
|
285 |
+
buffer = "" # 清空缓冲区
|
286 |
+
|
287 |
+
# 处理可能的不完整JSON
|
288 |
+
if chunk_text.endswith('}'):
|
289 |
+
content = await extract_content(chunk_text)
|
290 |
+
if content:
|
291 |
+
complete_response += content
|
292 |
+
else:
|
293 |
+
buffer = chunk_text # 存储不完整的数据到缓冲区
|
294 |
+
|
295 |
+
except UnicodeDecodeError as e:
|
296 |
+
print(f"解码错误: {e}")
|
297 |
+
buffer = "" # 清空缓冲区,跳过损坏的数据
|
298 |
+
continue
|
299 |
+
|
300 |
+
# 检查最终响应
|
301 |
+
if not complete_response:
|
302 |
+
print("警告: 收到空响应")
|
303 |
+
return None
|
304 |
+
|
305 |
+
return complete_response.strip()
|
306 |
+
|
307 |
+
except asyncio.TimeoutError:
|
308 |
+
print("请求超时")
|
309 |
+
return None
|
310 |
+
except Exception as e:
|
311 |
+
print(f"处理请求时出错: {e}")
|
312 |
+
await get_credential() # 刷新凭证
|
313 |
+
return None
|
314 |
|
315 |
if __name__ == "__main__":
|
316 |
uvicorn.run(app, host="0.0.0.0", port=PORT)
|