Update app/main.py
Browse files- app/main.py +422 -378
app/main.py
CHANGED
@@ -1,378 +1,422 @@
|
|
1 |
-
from fastapi import FastAPI, HTTPException, Request, Depends, status
|
2 |
-
from fastapi.responses import JSONResponse, StreamingResponse, HTMLResponse
|
3 |
-
from .models import ChatCompletionRequest, ChatCompletionResponse, ErrorResponse, ModelList
|
4 |
-
from .gemini import GeminiClient, ResponseWrapper
|
5 |
-
from .utils import handle_gemini_error, protect_from_abuse, APIKeyManager, test_api_key, format_log_message
|
6 |
-
import os
|
7 |
-
import json
|
8 |
-
import asyncio
|
9 |
-
from typing import Literal
|
10 |
-
import random
|
11 |
-
import requests
|
12 |
-
from datetime import datetime, timedelta
|
13 |
-
from apscheduler.schedulers.background import BackgroundScheduler
|
14 |
-
import sys
|
15 |
-
import logging
|
16 |
-
|
17 |
-
logging.getLogger("uvicorn").disabled = True
|
18 |
-
logging.getLogger("uvicorn.access").disabled = True
|
19 |
-
|
20 |
-
# 配置 logger
|
21 |
-
logger = logging.getLogger("my_logger")
|
22 |
-
logger.setLevel(logging.DEBUG)
|
23 |
-
|
24 |
-
def translate_error(message: str) -> str:
|
25 |
-
if "quota exceeded" in message.lower():
|
26 |
-
return "API 密钥配额已用尽"
|
27 |
-
if "invalid argument" in message.lower():
|
28 |
-
return "无效参数"
|
29 |
-
if "internal server error" in message.lower():
|
30 |
-
return "服务器内部错误"
|
31 |
-
if "service unavailable" in message.lower():
|
32 |
-
return "服务不可用"
|
33 |
-
return message
|
34 |
-
|
35 |
-
|
36 |
-
def handle_exception(exc_type, exc_value, exc_traceback):
|
37 |
-
if issubclass(exc_type, KeyboardInterrupt):
|
38 |
-
sys.excepthook(exc_type, exc_value, exc_traceback)
|
39 |
-
return
|
40 |
-
error_message = translate_error(str(exc_value))
|
41 |
-
log_msg = format_log_message('ERROR', f"未捕获的异常: %s" % error_message, extra={'status_code': 500, 'error_message': error_message})
|
42 |
-
logger.error(log_msg)
|
43 |
-
|
44 |
-
|
45 |
-
sys.excepthook = handle_exception
|
46 |
-
|
47 |
-
app = FastAPI()
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, HTTPException, Request, Depends, status
|
2 |
+
from fastapi.responses import JSONResponse, StreamingResponse, HTMLResponse
|
3 |
+
from .models import ChatCompletionRequest, ChatCompletionResponse, ErrorResponse, ModelList
|
4 |
+
from .gemini import GeminiClient, ResponseWrapper
|
5 |
+
from .utils import handle_gemini_error, protect_from_abuse, APIKeyManager, test_api_key, format_log_message
|
6 |
+
import os
|
7 |
+
import json
|
8 |
+
import asyncio
|
9 |
+
from typing import Literal
|
10 |
+
import random
|
11 |
+
import requests
|
12 |
+
from datetime import datetime, timedelta
|
13 |
+
from apscheduler.schedulers.background import BackgroundScheduler
|
14 |
+
import sys
|
15 |
+
import logging
|
16 |
+
|
17 |
+
logging.getLogger("uvicorn").disabled = True
|
18 |
+
logging.getLogger("uvicorn.access").disabled = True
|
19 |
+
|
20 |
+
# 配置 logger
|
21 |
+
logger = logging.getLogger("my_logger")
|
22 |
+
logger.setLevel(logging.DEBUG)
|
23 |
+
|
24 |
+
def translate_error(message: str) -> str:
|
25 |
+
if "quota exceeded" in message.lower():
|
26 |
+
return "API 密钥配额已用尽"
|
27 |
+
if "invalid argument" in message.lower():
|
28 |
+
return "无效参数"
|
29 |
+
if "internal server error" in message.lower():
|
30 |
+
return "服务器内部错误"
|
31 |
+
if "service unavailable" in message.lower():
|
32 |
+
return "服务不可用"
|
33 |
+
return message
|
34 |
+
|
35 |
+
|
36 |
+
def handle_exception(exc_type, exc_value, exc_traceback):
|
37 |
+
if issubclass(exc_type, KeyboardInterrupt):
|
38 |
+
sys.excepthook(exc_type, exc_value, exc_traceback)
|
39 |
+
return
|
40 |
+
error_message = translate_error(str(exc_value))
|
41 |
+
log_msg = format_log_message('ERROR', f"未捕获的异常: %s" % error_message, extra={'status_code': 500, 'error_message': error_message})
|
42 |
+
logger.error(log_msg)
|
43 |
+
|
44 |
+
|
45 |
+
sys.excepthook = handle_exception
|
46 |
+
|
47 |
+
app = FastAPI()
|
48 |
+
|
49 |
+
# 统计相关功能
|
50 |
+
STATS_FILE = "stats.json"
|
51 |
+
|
52 |
+
def load_stats():
|
53 |
+
try:
|
54 |
+
with open(STATS_FILE, "r") as f:
|
55 |
+
return json.load(f)
|
56 |
+
except (FileNotFoundError, json.JSONDecodeError):
|
57 |
+
return {
|
58 |
+
"total_calls": 0,
|
59 |
+
"today_calls": 0,
|
60 |
+
"total_tokens": 0,
|
61 |
+
"today_tokens": 0,
|
62 |
+
"last_reset": datetime.now().isoformat()
|
63 |
+
}
|
64 |
+
|
65 |
+
def save_stats(stats):
|
66 |
+
with open(STATS_FILE, "w") as f:
|
67 |
+
json.dump(stats, f, indent=2)
|
68 |
+
|
69 |
+
def update_stats(calls=0, tokens=0):
|
70 |
+
stats = load_stats()
|
71 |
+
stats["total_calls"] += calls
|
72 |
+
stats["today_calls"] += calls
|
73 |
+
stats["total_tokens"] += tokens
|
74 |
+
stats["today_tokens"] += tokens
|
75 |
+
save_stats(stats)
|
76 |
+
|
77 |
+
def reset_daily_stats():
|
78 |
+
stats = load_stats()
|
79 |
+
stats["today_calls"] = 0
|
80 |
+
stats["today_tokens"] = 0
|
81 |
+
stats["last_reset"] = datetime.now().isoformat()
|
82 |
+
save_stats(stats)
|
83 |
+
logger.info("每日统计数据已重置")
|
84 |
+
|
85 |
+
# 初始化定时任务
|
86 |
+
scheduler = BackgroundScheduler()
|
87 |
+
scheduler.add_job(reset_daily_stats, 'cron', hour=0, minute=0)
|
88 |
+
scheduler.start()
|
89 |
+
|
90 |
+
PASSWORD = os.environ.get("PASSWORD", "123")
|
91 |
+
MAX_REQUESTS_PER_MINUTE = int(os.environ.get("MAX_REQUESTS_PER_MINUTE", "30"))
|
92 |
+
MAX_REQUESTS_PER_DAY_PER_IP = int(
|
93 |
+
os.environ.get("MAX_REQUESTS_PER_DAY_PER_IP", "600"))
|
94 |
+
# MAX_RETRIES = int(os.environ.get('MaxRetries', '3').strip() or '3')
|
95 |
+
RETRY_DELAY = 1
|
96 |
+
MAX_RETRY_DELAY = 16
|
97 |
+
safety_settings = [
|
98 |
+
{
|
99 |
+
"category": "HARM_CATEGORY_HARASSMENT",
|
100 |
+
"threshold": "BLOCK_NONE"
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
104 |
+
"threshold": "BLOCK_NONE"
|
105 |
+
},
|
106 |
+
{
|
107 |
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
108 |
+
"threshold": "BLOCK_NONE"
|
109 |
+
},
|
110 |
+
{
|
111 |
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
112 |
+
"threshold": "BLOCK_NONE"
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"category": 'HARM_CATEGORY_CIVIC_INTEGRITY',
|
116 |
+
"threshold": 'BLOCK_NONE'
|
117 |
+
}
|
118 |
+
]
|
119 |
+
safety_settings_g2 = [
|
120 |
+
{
|
121 |
+
"category": "HARM_CATEGORY_HARASSMENT",
|
122 |
+
"threshold": "OFF"
|
123 |
+
},
|
124 |
+
{
|
125 |
+
"category": "HARM_CATEGORY_HATE_SPEECH",
|
126 |
+
"threshold": "OFF"
|
127 |
+
},
|
128 |
+
{
|
129 |
+
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
130 |
+
"threshold": "OFF"
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
|
134 |
+
"threshold": "OFF"
|
135 |
+
},
|
136 |
+
{
|
137 |
+
"category": 'HARM_CATEGORY_CIVIC_INTEGRITY',
|
138 |
+
"threshold": 'OFF'
|
139 |
+
}
|
140 |
+
]
|
141 |
+
|
142 |
+
key_manager = APIKeyManager() # 实例化 APIKeyManager,栈会在 __init__ 中初始化
|
143 |
+
current_api_key = key_manager.get_available_key()
|
144 |
+
|
145 |
+
|
146 |
+
def switch_api_key():
|
147 |
+
global current_api_key
|
148 |
+
key = key_manager.get_available_key() # get_available_key 会处理栈的逻辑
|
149 |
+
if key:
|
150 |
+
current_api_key = key
|
151 |
+
log_msg = format_log_message('INFO', f"API key 替换为 → {current_api_key[:8]}...", extra={'key': current_api_key[:8], 'request_type': 'switch_key'})
|
152 |
+
logger.info(log_msg)
|
153 |
+
else:
|
154 |
+
log_msg = format_log_message('ERROR', "API key 替换失败,所有API key都已尝试,请重新配置或稍后重试", extra={'key': 'N/A', 'request_type': 'switch_key', 'status_code': 'N/A'})
|
155 |
+
logger.error(log_msg)
|
156 |
+
|
157 |
+
|
158 |
+
async def check_keys():
|
159 |
+
available_keys = []
|
160 |
+
for key in key_manager.api_keys:
|
161 |
+
is_valid = await test_api_key(key)
|
162 |
+
status_msg = "有效" if is_valid else "无效"
|
163 |
+
log_msg = format_log_message('INFO', f"API Key {key[:10]}... {status_msg}.")
|
164 |
+
logger.info(log_msg)
|
165 |
+
if is_valid:
|
166 |
+
available_keys.append(key)
|
167 |
+
if not available_keys:
|
168 |
+
log_msg = format_log_message('ERROR', "没有可用的 API 密钥!", extra={'key': 'N/A', 'request_type': 'startup', 'status_code': 'N/A'})
|
169 |
+
logger.error(log_msg)
|
170 |
+
return available_keys
|
171 |
+
|
172 |
+
|
173 |
+
@app.on_event("startup")
|
174 |
+
async def startup_event():
|
175 |
+
log_msg = format_log_message('INFO', "Starting Gemini API proxy...")
|
176 |
+
logger.info(log_msg)
|
177 |
+
available_keys = await check_keys()
|
178 |
+
if available_keys:
|
179 |
+
key_manager.api_keys = available_keys
|
180 |
+
key_manager._reset_key_stack() # 启动时也确保创建随机栈
|
181 |
+
key_manager.show_all_keys()
|
182 |
+
log_msg = format_log_message('INFO', f"可用 API 密钥数量:{len(key_manager.api_keys)}")
|
183 |
+
logger.info(log_msg)
|
184 |
+
# MAX_RETRIES = len(key_manager.api_keys)
|
185 |
+
log_msg = format_log_message('INFO', f"最大重试次数设置为:{len(key_manager.api_keys)}") # 添加日志
|
186 |
+
logger.info(log_msg)
|
187 |
+
if key_manager.api_keys:
|
188 |
+
all_models = await GeminiClient.list_available_models(key_manager.api_keys[0])
|
189 |
+
GeminiClient.AVAILABLE_MODELS = [model.replace(
|
190 |
+
"models/", "") for model in all_models]
|
191 |
+
log_msg = format_log_message('INFO', "Available models loaded.")
|
192 |
+
logger.info(log_msg)
|
193 |
+
|
194 |
+
@app.get("/v1/models", response_model=ModelList)
|
195 |
+
def list_models():
|
196 |
+
log_msg = format_log_message('INFO', "Received request to list models", extra={'request_type': 'list_models', 'status_code': 200})
|
197 |
+
logger.info(log_msg)
|
198 |
+
return ModelList(data=[{"id": model, "object": "model", "created": 1678888888, "owned_by": "organization-owner"} for model in GeminiClient.AVAILABLE_MODELS])
|
199 |
+
|
200 |
+
|
201 |
+
async def verify_password(request: Request):
|
202 |
+
if PASSWORD:
|
203 |
+
auth_header = request.headers.get("Authorization")
|
204 |
+
if not auth_header or not auth_header.startswith("Bearer "):
|
205 |
+
raise HTTPException(
|
206 |
+
status_code=401, detail="Unauthorized: Missing or invalid token")
|
207 |
+
token = auth_header.split(" ")[1]
|
208 |
+
if token != PASSWORD:
|
209 |
+
raise HTTPException(
|
210 |
+
status_code=401, detail="Unauthorized: Invalid token")
|
211 |
+
|
212 |
+
|
213 |
+
async def process_request(chat_request: ChatCompletionRequest, http_request: Request, request_type: Literal['stream', 'non-stream']):
|
214 |
+
global current_api_key
|
215 |
+
protect_from_abuse(
|
216 |
+
http_request, MAX_REQUESTS_PER_MINUTE, MAX_REQUESTS_PER_DAY_PER_IP)
|
217 |
+
if chat_request.model not in GeminiClient.AVAILABLE_MODELS:
|
218 |
+
error_msg = "无效的模型"
|
219 |
+
extra_log = {'request_type': request_type, 'model': chat_request.model, 'status_code': 400, 'error_message': error_msg}
|
220 |
+
log_msg = format_log_message('ERROR', error_msg, extra=extra_log)
|
221 |
+
logger.error(log_msg)
|
222 |
+
raise HTTPException(
|
223 |
+
status_code=status.HTTP_400_BAD_REQUEST, detail=error_msg)
|
224 |
+
|
225 |
+
key_manager.reset_tried_keys_for_request() # 在每次请求处理开始时重置 tried_keys 集合
|
226 |
+
|
227 |
+
contents, system_instruction = GeminiClient.convert_messages(
|
228 |
+
GeminiClient, chat_request.messages)
|
229 |
+
|
230 |
+
retry_attempts = len(key_manager.api_keys) if key_manager.api_keys else 1 # 重试次数等于密钥数量,至少尝试 1 次
|
231 |
+
for attempt in range(1, retry_attempts + 1):
|
232 |
+
if attempt == 1:
|
233 |
+
current_api_key = key_manager.get_available_key() # 每次循环开始都获取新的 key, 栈逻辑在 get_available_key 中处理
|
234 |
+
|
235 |
+
if current_api_key is None: # 检查是否获取到 API 密钥
|
236 |
+
log_msg_no_key = format_log_message('WARNING', "没有可用的 API 密钥,跳过本次尝试", extra={'request_type': request_type, 'model': chat_request.model, 'status_code': 'N/A'})
|
237 |
+
logger.warning(log_msg_no_key)
|
238 |
+
break # 如果没有可用密钥,跳出循环
|
239 |
+
|
240 |
+
extra_log = {'key': current_api_key[:8], 'request_type': request_type, 'model': chat_request.model, 'status_code': 'N/A', 'error_message': ''}
|
241 |
+
log_msg = format_log_message('INFO', f"第 {attempt}/{retry_attempts} 次尝试 ... 使用密钥: {current_api_key[:8]}...", extra=extra_log)
|
242 |
+
logger.info(log_msg)
|
243 |
+
|
244 |
+
gemini_client = GeminiClient(current_api_key)
|
245 |
+
try:
|
246 |
+
if chat_request.stream:
|
247 |
+
async def stream_generator():
|
248 |
+
try:
|
249 |
+
async for chunk in gemini_client.stream_chat(chat_request, contents, safety_settings_g2 if 'gemini-2.0-flash-exp' in chat_request.model else safety_settings, system_instruction):
|
250 |
+
formatted_chunk = {"id": "chatcmpl-someid", "object": "chat.completion.chunk", "created": 1234567,
|
251 |
+
"model": chat_request.model, "choices": [{"delta": {"role": "assistant", "content": chunk}, "index": 0, "finish_reason": None}]}
|
252 |
+
yield f"data: {json.dumps(formatted_chunk)}\n\n"
|
253 |
+
yield "data: [DONE]\n\n"
|
254 |
+
|
255 |
+
except asyncio.CancelledError:
|
256 |
+
extra_log_cancel = {'key': current_api_key[:8], 'request_type': request_type, 'model': chat_request.model, 'error_message': '客户端已断开连接'}
|
257 |
+
log_msg = format_log_message('INFO', "客户端连接已中断", extra=extra_log_cancel)
|
258 |
+
logger.info(log_msg)
|
259 |
+
except Exception as e:
|
260 |
+
error_detail = handle_gemini_error(
|
261 |
+
e, current_api_key, key_manager)
|
262 |
+
yield f"data: {json.dumps({'error': {'message': error_detail, 'type': 'gemini_error'}})}\n\n"
|
263 |
+
return StreamingResponse(stream_generator(), media_type="text/event-stream")
|
264 |
+
else:
|
265 |
+
async def run_gemini_completion():
|
266 |
+
try:
|
267 |
+
response_content = await asyncio.to_thread(gemini_client.complete_chat, chat_request, contents, safety_settings_g2 if 'gemini-2.0-flash-exp' in chat_request.model else safety_settings, system_instruction)
|
268 |
+
return response_content
|
269 |
+
except asyncio.CancelledError:
|
270 |
+
extra_log_gemini_cancel = {'key': current_api_key[:8], 'request_type': request_type, 'model': chat_request.model, 'error_message': '客户端断开导致API调用取消'}
|
271 |
+
log_msg = format_log_message('INFO', "API调用因客户端断开而取消", extra=extra_log_gemini_cancel)
|
272 |
+
logger.info(log_msg)
|
273 |
+
raise
|
274 |
+
|
275 |
+
async def check_client_disconnect():
|
276 |
+
while True:
|
277 |
+
if await http_request.is_disconnected():
|
278 |
+
extra_log_client_disconnect = {'key': current_api_key[:8], 'request_type': request_type, 'model': chat_request.model, 'error_message': '检测到客户端断开连接'}
|
279 |
+
log_msg = format_log_message('INFO', "客户端连接已中断,正在取消API请求", extra=extra_log_client_disconnect)
|
280 |
+
logger.info(log_msg)
|
281 |
+
return True
|
282 |
+
await asyncio.sleep(0.5)
|
283 |
+
|
284 |
+
gemini_task = asyncio.create_task(run_gemini_completion())
|
285 |
+
disconnect_task = asyncio.create_task(check_client_disconnect())
|
286 |
+
|
287 |
+
try:
|
288 |
+
done, pending = await asyncio.wait(
|
289 |
+
[gemini_task, disconnect_task],
|
290 |
+
return_when=asyncio.FIRST_COMPLETED
|
291 |
+
)
|
292 |
+
|
293 |
+
if disconnect_task in done:
|
294 |
+
gemini_task.cancel()
|
295 |
+
try:
|
296 |
+
await gemini_task
|
297 |
+
except asyncio.CancelledError:
|
298 |
+
extra_log_gemini_task_cancel = {'key': current_api_key[:8], 'request_type': request_type, 'model': chat_request.model, 'error_message': 'API任务已终止'}
|
299 |
+
log_msg = format_log_message('INFO', "API任务已成功取消", extra=extra_log_gemini_task_cancel)
|
300 |
+
logger.info(log_msg)
|
301 |
+
# 直接抛出异常中断循环
|
302 |
+
raise HTTPException(status_code=status.HTTP_408_REQUEST_TIMEOUT, detail="客户端连接已中断")
|
303 |
+
|
304 |
+
if gemini_task in done:
|
305 |
+
disconnect_task.cancel()
|
306 |
+
try:
|
307 |
+
await disconnect_task
|
308 |
+
except asyncio.CancelledError:
|
309 |
+
pass
|
310 |
+
response_content = gemini_task.result()
|
311 |
+
if response_content.text == "":
|
312 |
+
extra_log_empty_response = {'key': current_api_key[:8], 'request_type': request_type, 'model': chat_request.model, 'status_code': 204}
|
313 |
+
log_msg = format_log_message('INFO', "Gemini API 返回空响应", extra=extra_log_empty_response)
|
314 |
+
logger.info(log_msg)
|
315 |
+
# 继续循环
|
316 |
+
continue
|
317 |
+
response = ChatCompletionResponse(id="chatcmpl-someid", object="chat.completion", created=1234567890, model=chat_request.model,
|
318 |
+
choices=[{"index": 0, "message": {"role": "assistant", "content": response_content.text}, "finish_reason": "stop"}])
|
319 |
+
extra_log_success = {'key': current_api_key[:8], 'request_type': request_type, 'model': chat_request.model, 'status_code': 200}
|
320 |
+
log_msg = format_log_message('INFO', "请求处理成功", extra=extra_log_success)
|
321 |
+
logger.info(log_msg)
|
322 |
+
# 更新统计
|
323 |
+
tokens = response_content.total_token_count or 0
|
324 |
+
update_stats(calls=1, tokens=tokens)
|
325 |
+
return response
|
326 |
+
|
327 |
+
except asyncio.CancelledError:
|
328 |
+
extra_log_request_cancel = {'key': current_api_key[:8], 'request_type': request_type, 'model': chat_request.model, 'error_message':"请求被取消" }
|
329 |
+
log_msg = format_log_message('INFO', "请求取消", extra=extra_log_request_cancel)
|
330 |
+
logger.info(log_msg)
|
331 |
+
raise
|
332 |
+
|
333 |
+
except HTTPException as e:
|
334 |
+
if e.status_code == status.HTTP_408_REQUEST_TIMEOUT:
|
335 |
+
extra_log = {'key': current_api_key[:8], 'request_type': request_type, 'model': chat_request.model,
|
336 |
+
'status_code': 408, 'error_message': '客户端连接中断'}
|
337 |
+
log_msg = format_log_message('ERROR', "客户端连接中断,终止后续重试", extra=extra_log)
|
338 |
+
logger.error(log_msg)
|
339 |
+
raise
|
340 |
+
else:
|
341 |
+
raise
|
342 |
+
except Exception as e:
|
343 |
+
handle_gemini_error(e, current_api_key, key_manager)
|
344 |
+
if attempt < retry_attempts:
|
345 |
+
switch_api_key()
|
346 |
+
continue
|
347 |
+
|
348 |
+
msg = "所有API密钥均失败,请稍后重试"
|
349 |
+
extra_log_all_fail = {'key': "ALL", 'request_type': request_type, 'model': chat_request.model, 'status_code': 500, 'error_message': msg}
|
350 |
+
log_msg = format_log_message('ERROR', msg, extra=extra_log_all_fail)
|
351 |
+
logger.error(log_msg)
|
352 |
+
raise HTTPException(
|
353 |
+
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=msg)
|
354 |
+
|
355 |
+
|
356 |
+
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse)
|
357 |
+
async def chat_completions(request: ChatCompletionRequest, http_request: Request, _: None = Depends(verify_password)):
|
358 |
+
return await process_request(request, http_request, "stream" if request.stream else "non-stream")
|
359 |
+
|
360 |
+
|
361 |
+
@app.exception_handler(Exception)
|
362 |
+
async def global_exception_handler(request: Request, exc: Exception):
|
363 |
+
error_message = translate_error(str(exc))
|
364 |
+
extra_log_unhandled_exception = {'status_code': 500, 'error_message': error_message}
|
365 |
+
log_msg = format_log_message('ERROR', f"Unhandled exception: {error_message}", extra=extra_log_unhandled_exception)
|
366 |
+
logger.error(log_msg)
|
367 |
+
return JSONResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content=ErrorResponse(message=str(exc), type="internal_error").dict())
|
368 |
+
|
369 |
+
|
370 |
+
@app.get("/", response_class=HTMLResponse)
|
371 |
+
async def root():
|
372 |
+
html_content = f"""
|
373 |
+
<!DOCTYPE html>
|
374 |
+
<html>
|
375 |
+
<head>
|
376 |
+
<title>Gemini API 代理服务</title>
|
377 |
+
<style>
|
378 |
+
body {{
|
379 |
+
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
380 |
+
max-width: 800px;
|
381 |
+
margin: 0 auto;
|
382 |
+
padding: 20px;
|
383 |
+
line-height: 1.6;
|
384 |
+
}}
|
385 |
+
h1 {{
|
386 |
+
color: #333;
|
387 |
+
text-align: center;
|
388 |
+
margin-bottom: 30px;
|
389 |
+
}}
|
390 |
+
.info-box {{
|
391 |
+
background-color: #f8f9fa;
|
392 |
+
border: 1px solid #dee2e6;
|
393 |
+
border-radius: 4px;
|
394 |
+
padding: 20px;
|
395 |
+
margin-bottom: 20px;
|
396 |
+
}}
|
397 |
+
.status {{
|
398 |
+
color: #28a745;
|
399 |
+
font-weight: bold;
|
400 |
+
}}
|
401 |
+
</style>
|
402 |
+
</head>
|
403 |
+
<body>
|
404 |
+
<h1>🤖 Gemini API 代理服务</h1>
|
405 |
+
|
406 |
+
<div class="info-box">
|
407 |
+
<h2>🟢 运行状态</h2>
|
408 |
+
<p class="status">服务运行中</p>
|
409 |
+
<p>可用API密钥数量: {len(key_manager.api_keys)}</p>
|
410 |
+
<p>可用模型数量: {len(GeminiClient.AVAILABLE_MODELS)}</p>
|
411 |
+
</div>
|
412 |
+
|
413 |
+
<div class="info-box">
|
414 |
+
<h2>⚙️ 环境配置</h2>
|
415 |
+
<p>每分钟请求限制: {MAX_REQUESTS_PER_MINUTE}</p>
|
416 |
+
<p>每IP每日请求限制: {MAX_REQUESTS_PER_DAY_PER_IP}</p>
|
417 |
+
<p>最大重试次数: {len(key_manager.api_keys)}</p>
|
418 |
+
</div>
|
419 |
+
</body>
|
420 |
+
</html>
|
421 |
+
"""
|
422 |
+
return html_content
|