Delete more_core.py
Browse files- more_core.py +0 -353
more_core.py
DELETED
@@ -1,353 +0,0 @@
|
|
1 |
-
import json
|
2 |
-
import multiprocessing
|
3 |
-
import os
|
4 |
-
import random
|
5 |
-
import string
|
6 |
-
import time
|
7 |
-
from typing import Dict, Any, List
|
8 |
-
|
9 |
-
import tiktoken
|
10 |
-
import uvicorn
|
11 |
-
from apscheduler.schedulers.background import BackgroundScheduler
|
12 |
-
from fastapi import FastAPI, Request, HTTPException
|
13 |
-
from fastapi.responses import JSONResponse
|
14 |
-
from starlette.responses import HTMLResponse
|
15 |
-
# 禁用 SSL 警告
|
16 |
-
import urllib3
|
17 |
-
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
18 |
-
|
19 |
-
import degpt as dg
|
20 |
-
|
21 |
-
# debug for Log
|
22 |
-
debug = False
|
23 |
-
|
24 |
-
app = FastAPI(
|
25 |
-
title="ones",
|
26 |
-
description="High-performance API service",
|
27 |
-
version="1.1.5|2025.1.12"
|
28 |
-
)
|
29 |
-
|
30 |
-
|
31 |
-
class APIServer:
|
32 |
-
"""High-performance API server implementation"""
|
33 |
-
|
34 |
-
def __init__(self, app: FastAPI):
|
35 |
-
self.app = app
|
36 |
-
self.encoding = tiktoken.get_encoding("cl100k_base")
|
37 |
-
self._setup_routes()
|
38 |
-
self._setup_scheduler()
|
39 |
-
|
40 |
-
def _setup_scheduler(self):
|
41 |
-
""" Schedule tasks to check and reload routes and models at regular intervals. """
|
42 |
-
self.scheduler = BackgroundScheduler()
|
43 |
-
# Scheduled Task 1: Check and reload routes every 30 seconds. Calls _reload_routes_if_needed method to check if routes need to be updated
|
44 |
-
self.scheduler.add_job(self._reload_routes_if_needed, 'interval', seconds=30)
|
45 |
-
|
46 |
-
# Scheduled Task 2: Reload models every 30 minutes (1800 seconds). This task will check and update the model data periodically
|
47 |
-
self.scheduler.add_job(self._reload_check, 'interval', seconds=60 * 30)
|
48 |
-
self.scheduler.start()
|
49 |
-
def _setup_routes(self) -> None:
|
50 |
-
"""Initialize API routes"""
|
51 |
-
self.routes = """Initialize API routes"""
|
52 |
-
|
53 |
-
# Static routes with names for filtering
|
54 |
-
@self.app.get("/", name="root", include_in_schema=False)
|
55 |
-
def root():
|
56 |
-
return HTMLResponse(content="<h1>hello. It's home page.</h1>")
|
57 |
-
|
58 |
-
@self.app.get("/web", name="web")
|
59 |
-
def web():
|
60 |
-
return HTMLResponse(content="<h1>hello. It's web page.</h1>")
|
61 |
-
|
62 |
-
@self.app.get("/health", name="health")
|
63 |
-
def health():
|
64 |
-
return JSONResponse(content={"status": "working"})
|
65 |
-
|
66 |
-
|
67 |
-
@self.app.get("/v1/models", name="models")
|
68 |
-
def models():
|
69 |
-
if debug:
|
70 |
-
print("Fetching models...")
|
71 |
-
models_str = dg.get_models()
|
72 |
-
try:
|
73 |
-
models_json = json.loads(models_str)
|
74 |
-
return JSONResponse(content=models_json)
|
75 |
-
except json.JSONDecodeError as e:
|
76 |
-
raise HTTPException(status_code=500,
|
77 |
-
detail=f"Invalid models data: {str(e)}")
|
78 |
-
|
79 |
-
# Register dynamic chat completion routes
|
80 |
-
routes = self._get_routes()
|
81 |
-
if debug:
|
82 |
-
print(f"Registering routes: {routes}")
|
83 |
-
for path in routes:
|
84 |
-
self._register_route(path)
|
85 |
-
existing_routes = [route.path for route in self.app.routes if hasattr(route, 'path')]
|
86 |
-
if debug:
|
87 |
-
print(f"All routes now: {existing_routes}")
|
88 |
-
|
89 |
-
def _get_routes(self) -> List[str]:
|
90 |
-
"""Get configured API routes"""
|
91 |
-
default_path = "/api/v1/chat/completions"
|
92 |
-
replace_chat = os.getenv("REPLACE_CHAT", "")
|
93 |
-
prefix_chat = os.getenv("PREFIX_CHAT", "")
|
94 |
-
append_chat = os.getenv("APPEND_CHAT", "")
|
95 |
-
|
96 |
-
if replace_chat:
|
97 |
-
return [path.strip() for path in replace_chat.split(",") if path.strip()]
|
98 |
-
|
99 |
-
routes = []
|
100 |
-
if prefix_chat:
|
101 |
-
routes.extend(f"{prefix.rstrip('/')}{default_path}"
|
102 |
-
for prefix in prefix_chat.split(","))
|
103 |
-
return routes
|
104 |
-
|
105 |
-
if append_chat:
|
106 |
-
append_paths = [path.strip() for path in append_chat.split(",") if path.strip()]
|
107 |
-
routes = [default_path] + append_paths
|
108 |
-
return routes
|
109 |
-
|
110 |
-
return [default_path]
|
111 |
-
|
112 |
-
def _register_route(self, path: str) -> None:
|
113 |
-
"""Register a single API route"""
|
114 |
-
global debug
|
115 |
-
|
116 |
-
async def chat_endpoint(request: Request) -> Dict[str, Any]:
|
117 |
-
try:
|
118 |
-
if debug:
|
119 |
-
print(f"Request chat_endpoint...")
|
120 |
-
headers = dict(request.headers)
|
121 |
-
data = await request.json()
|
122 |
-
if debug:
|
123 |
-
print(f"Request received...\r\n\tHeaders: {headers},\r\n\tData: {data}")
|
124 |
-
return self._generate_response(headers, data)
|
125 |
-
except Exception as e:
|
126 |
-
if debug:
|
127 |
-
print(f"Request processing error: {e}")
|
128 |
-
raise HTTPException(status_code=500, detail="Internal server error") from e
|
129 |
-
|
130 |
-
self.app.post(path)(chat_endpoint)
|
131 |
-
|
132 |
-
def _calculate_tokens(self, text: str) -> int:
|
133 |
-
"""Calculate token count for text"""
|
134 |
-
return len(self.encoding.encode(text))
|
135 |
-
|
136 |
-
def _generate_id(self, letters: int = 4, numbers: int = 6) -> str:
|
137 |
-
"""Generate unique chat completion ID"""
|
138 |
-
letters_str = ''.join(random.choices(string.ascii_lowercase, k=letters))
|
139 |
-
numbers_str = ''.join(random.choices(string.digits, k=numbers))
|
140 |
-
return f"chatcmpl-{letters_str}{numbers_str}"
|
141 |
-
|
142 |
-
def is_chatgpt_format(self, data):
|
143 |
-
"""Check if the data is in the expected ChatGPT format"""
|
144 |
-
try:
|
145 |
-
# If the data is a string, try to parse it as JSON
|
146 |
-
if isinstance(data, str):
|
147 |
-
try:
|
148 |
-
data = json.loads(data)
|
149 |
-
except json.JSONDecodeError:
|
150 |
-
return False # If the string can't be parsed, it's not in the expected format
|
151 |
-
|
152 |
-
# Now check if data is a dictionary and contains the necessary structure
|
153 |
-
if isinstance(data, dict):
|
154 |
-
# Ensure 'choices' is a list and the first item has a 'message' field
|
155 |
-
if "choices" in data and isinstance(data["choices"], list) and len(data["choices"]) > 0:
|
156 |
-
if "message" in data["choices"][0]:
|
157 |
-
return True
|
158 |
-
except Exception as e:
|
159 |
-
print(f"Error checking ChatGPT format: {e}")
|
160 |
-
return False
|
161 |
-
|
162 |
-
def process_result(self, result, model):
|
163 |
-
# 如果result是字符串,尝试将其转换为JSON
|
164 |
-
if isinstance(result, str):
|
165 |
-
try:
|
166 |
-
result = json.loads(result) # 转换为JSON
|
167 |
-
except json.JSONDecodeError:
|
168 |
-
return result
|
169 |
-
|
170 |
-
# 确保result是一个字典(JSON对象)
|
171 |
-
if isinstance(result, dict):
|
172 |
-
# 设置新的id和object值
|
173 |
-
result['id'] = self._generate_id() # 根据需要设置新的ID值
|
174 |
-
result['object'] = "chat.completion" # 根据需要设置新的object值
|
175 |
-
|
176 |
-
# 添加model值
|
177 |
-
result['model'] = model # 根据需要设置model值
|
178 |
-
return result
|
179 |
-
|
180 |
-
def _generate_response(self, headers: Dict[str, str], data: Dict[str, Any]) -> Dict[str, Any]:
|
181 |
-
"""Generate API response"""
|
182 |
-
global debug
|
183 |
-
if debug:
|
184 |
-
print("inside _generate_response")
|
185 |
-
try:
|
186 |
-
# check model
|
187 |
-
model = data.get("model")
|
188 |
-
# print(f"model: {model}")
|
189 |
-
# just auto will check
|
190 |
-
if "auto" == model:
|
191 |
-
model = dg.get_auto_model()
|
192 |
-
# else:
|
193 |
-
# if not dg.is_model_available(model):
|
194 |
-
# raise HTTPException(status_code=400, detail="Invalid Model")
|
195 |
-
# ## kuan
|
196 |
-
# model = dg.get_model_by_autoupdate(model)
|
197 |
-
# must has token ? token check
|
198 |
-
authorization = headers.get('Authorization')
|
199 |
-
token = os.getenv("TOKEN", "")
|
200 |
-
if token and token not in authorization:
|
201 |
-
raise HTTPException(status_code=401, detail="无效的Token")
|
202 |
-
|
203 |
-
# call ai
|
204 |
-
msgs = data.get("messages")
|
205 |
-
if not msgs:
|
206 |
-
raise HTTPException(status_code=400, detail="消息不能为空")
|
207 |
-
|
208 |
-
if debug:
|
209 |
-
print(f"request model: {model}")
|
210 |
-
if token:
|
211 |
-
print(f"request token: {token}")
|
212 |
-
print(f"request messages: {msgs}")
|
213 |
-
|
214 |
-
result = dg.chat_completion_messages(
|
215 |
-
messages=msgs,
|
216 |
-
model=model
|
217 |
-
)
|
218 |
-
if debug:
|
219 |
-
print(f"result: {result}---- {self.is_chatgpt_format(result)}")
|
220 |
-
|
221 |
-
# If the request body data already matches ChatGPT format, return it directly
|
222 |
-
if self.is_chatgpt_format(result):
|
223 |
-
# If data already follows ChatGPT format, use it directly
|
224 |
-
response_data = self.process_result(result, model)
|
225 |
-
else:
|
226 |
-
# Calculate the current timestamp
|
227 |
-
current_timestamp = int(time.time() * 1000)
|
228 |
-
# Otherwise, calculate the tokens and return a structured response
|
229 |
-
prompt_tokens = self._calculate_tokens(str(data))
|
230 |
-
completion_tokens = self._calculate_tokens(result)
|
231 |
-
total_tokens = prompt_tokens + completion_tokens
|
232 |
-
|
233 |
-
response_data = {
|
234 |
-
"id": self._generate_id(),
|
235 |
-
"object": "chat.completion",
|
236 |
-
"created": current_timestamp,
|
237 |
-
"model": data.get("model", "gpt-4o"),
|
238 |
-
"usage": {
|
239 |
-
"prompt_tokens": prompt_tokens,
|
240 |
-
"completion_tokens": completion_tokens,
|
241 |
-
"total_tokens": total_tokens
|
242 |
-
},
|
243 |
-
"choices": [{
|
244 |
-
"message": {
|
245 |
-
"role": "assistant",
|
246 |
-
"content": result
|
247 |
-
},
|
248 |
-
"finish_reason": "stop",
|
249 |
-
"index": 0
|
250 |
-
}]
|
251 |
-
}
|
252 |
-
|
253 |
-
# Print the response for debugging (you may remove this in production)
|
254 |
-
if debug:
|
255 |
-
print(f"Response Data: {response_data}")
|
256 |
-
|
257 |
-
return response_data
|
258 |
-
except Exception as e:
|
259 |
-
dg.record_call(model,False)
|
260 |
-
if debug:
|
261 |
-
print(f"Response generation error: {e}")
|
262 |
-
raise HTTPException(status_code=500, detail=str(e)) from e
|
263 |
-
|
264 |
-
def _get_workers_count(self) -> int:
|
265 |
-
"""Calculate optimal worker count"""
|
266 |
-
try:
|
267 |
-
cpu_cores = multiprocessing.cpu_count()
|
268 |
-
recommended_workers = (2 * cpu_cores) + 1
|
269 |
-
return min(max(4, recommended_workers), 8)
|
270 |
-
except Exception as e:
|
271 |
-
if debug:
|
272 |
-
print(f"Worker count calculation failed: {e}, using default 4")
|
273 |
-
return 4
|
274 |
-
|
275 |
-
def get_server_config(self, host: str = "0.0.0.0", port: int = 7860) -> uvicorn.Config:
|
276 |
-
"""Get server configuration"""
|
277 |
-
workers = self._get_workers_count()
|
278 |
-
if debug:
|
279 |
-
print(f"Configuring server with {workers} workers")
|
280 |
-
|
281 |
-
return uvicorn.Config(
|
282 |
-
app=self.app,
|
283 |
-
host=host,
|
284 |
-
port=port,
|
285 |
-
workers=workers,
|
286 |
-
loop="uvloop",
|
287 |
-
limit_concurrency=1000,
|
288 |
-
timeout_keep_alive=30,
|
289 |
-
access_log=True,
|
290 |
-
log_level="info",
|
291 |
-
http="httptools"
|
292 |
-
)
|
293 |
-
|
294 |
-
def run(self, host: str = "0.0.0.0", port: int = 7860) -> None:
|
295 |
-
"""Run the API server"""
|
296 |
-
config = self.get_server_config(host, port)
|
297 |
-
server = uvicorn.Server(config)
|
298 |
-
server.run()
|
299 |
-
|
300 |
-
def _reload_check(self) -> None:
|
301 |
-
dg.reload_check()
|
302 |
-
|
303 |
-
|
304 |
-
def _reload_routes_if_needed(self) -> None:
|
305 |
-
"""Check if routes need to be reloaded based on environment variables"""
|
306 |
-
# reload Debug
|
307 |
-
global debug
|
308 |
-
debug = os.getenv("DEBUG", "False").lower() in ["true", "1", "t"]
|
309 |
-
# relaod routes
|
310 |
-
new_routes = self._get_routes()
|
311 |
-
current_routes = [route for route in self.app.routes if hasattr(route, 'path')]
|
312 |
-
|
313 |
-
# Check if the current routes are different from the new routes
|
314 |
-
if [route.path for route in current_routes] != new_routes:
|
315 |
-
if debug:
|
316 |
-
print("Routes changed, reloading...")
|
317 |
-
self._reload_routes(new_routes)
|
318 |
-
|
319 |
-
# def _reload_routes(self, new_routes: List[str]) -> None:
|
320 |
-
# """Reload the routes based on the updated configuration"""
|
321 |
-
# # Clear existing routes
|
322 |
-
# self.app.routes.clear()
|
323 |
-
# # Register new routes
|
324 |
-
# for path in new_routes:
|
325 |
-
# self._register_route(path)
|
326 |
-
|
327 |
-
def _reload_routes(self, new_routes: List[str]) -> None:
|
328 |
-
"""Reload only dynamic routes while preserving static ones"""
|
329 |
-
# Define static route names
|
330 |
-
static_routes = {"root", "web", "health", "models"}
|
331 |
-
|
332 |
-
# Remove only dynamic routes
|
333 |
-
self.app.routes[:] = [
|
334 |
-
route for route in self.app.routes
|
335 |
-
if not hasattr(route, 'name') or route.name in static_routes
|
336 |
-
]
|
337 |
-
|
338 |
-
# Register new dynamic routes
|
339 |
-
for path in new_routes:
|
340 |
-
self._register_route(path)
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
def create_server() -> APIServer:
|
346 |
-
"""Factory function to create server instance"""
|
347 |
-
return APIServer(app)
|
348 |
-
|
349 |
-
|
350 |
-
if __name__ == "__main__":
|
351 |
-
port = int(os.getenv("PORT", "7860"))
|
352 |
-
server = create_server()
|
353 |
-
server.run(port=port)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|