Update api/utils.py
Browse files- api/utils.py +80 -97
api/utils.py
CHANGED
@@ -1,27 +1,23 @@
|
|
1 |
from datetime import datetime
|
2 |
-
import
|
3 |
-
import
|
4 |
-
import random
|
5 |
from typing import Any, Dict, Optional
|
|
|
6 |
|
7 |
import httpx
|
8 |
-
from
|
9 |
-
from api import
|
10 |
-
from
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
BASE_URL,
|
15 |
-
AGENT_MODE,
|
16 |
-
TRENDING_AGENT_MODE,
|
17 |
-
MODEL_PREFIXES,
|
18 |
-
)
|
19 |
from api.models import ChatRequest
|
|
|
20 |
from api.logger import setup_logger
|
21 |
|
22 |
logger = setup_logger(__name__)
|
23 |
|
24 |
-
|
25 |
def create_chat_completion_data(
|
26 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
27 |
) -> Dict[str, Any]:
|
@@ -40,61 +36,55 @@ def create_chat_completion_data(
|
|
40 |
"usage": None,
|
41 |
}
|
42 |
|
43 |
-
|
44 |
-
def
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
49 |
return {
|
50 |
"role": message.role,
|
51 |
-
"content": content,
|
52 |
"data": {
|
53 |
"imageBase64": message.content[1]["image_url"]["url"],
|
54 |
"fileText": "",
|
55 |
"title": "snapshot",
|
56 |
},
|
57 |
}
|
58 |
-
|
|
|
59 |
|
60 |
-
# Process streaming response with headers from config.py
|
61 |
-
async def process_streaming_response(request: ChatRequest):
|
62 |
-
logger.info(f"Processing streaming response for Model: {request.model}")
|
63 |
|
|
|
64 |
agent_mode = AGENT_MODE.get(request.model, {})
|
65 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
66 |
-
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
67 |
-
|
68 |
-
headers_api_chat = get_headers_api_chat(BASE_URL)
|
69 |
-
validated_token = validate.getHid()
|
70 |
-
logger.info(f"Retrieved validated token: {validated_token}")
|
71 |
-
|
72 |
-
if request.model == 'o1-preview':
|
73 |
-
delay_seconds = random.randint(1, 60)
|
74 |
-
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
|
75 |
-
await asyncio.sleep(delay_seconds)
|
76 |
|
77 |
json_data = {
|
78 |
-
"
|
79 |
-
"
|
80 |
-
"
|
81 |
-
"clickedForceWebSearch": False,
|
82 |
"codeModelMode": True,
|
83 |
-
"
|
84 |
-
"
|
85 |
"isMicMode": False,
|
|
|
86 |
"maxTokens": request.max_tokens,
|
87 |
-
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
88 |
-
"mobileClient": False,
|
89 |
-
"playgroundTemperature": request.temperature,
|
90 |
"playgroundTopP": request.top_p,
|
91 |
-
"
|
92 |
-
"
|
93 |
-
"
|
94 |
-
"
|
95 |
-
"
|
96 |
-
"
|
97 |
"visitFromDelta": False,
|
|
|
|
|
|
|
98 |
}
|
99 |
|
100 |
async with httpx.AsyncClient() as client:
|
@@ -102,7 +92,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
102 |
async with client.stream(
|
103 |
"POST",
|
104 |
f"{BASE_URL}/api/chat",
|
105 |
-
headers=
|
106 |
json=json_data,
|
107 |
timeout=100,
|
108 |
) as response:
|
@@ -110,9 +100,18 @@ async def process_streaming_response(request: ChatRequest):
|
|
110 |
async for line in response.aiter_lines():
|
111 |
timestamp = int(datetime.now().timestamp())
|
112 |
if line:
|
113 |
-
|
114 |
-
|
115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
yield "data: [DONE]\n\n"
|
117 |
except httpx.HTTPStatusError as e:
|
118 |
logger.error(f"HTTP error occurred: {e}")
|
@@ -121,62 +120,46 @@ async def process_streaming_response(request: ChatRequest):
|
|
121 |
logger.error(f"Error occurred during request: {e}")
|
122 |
raise HTTPException(status_code=500, detail=str(e))
|
123 |
|
124 |
-
# Process non-streaming response with headers from config.py
|
125 |
-
async def process_non_streaming_response(request: ChatRequest):
|
126 |
-
logger.info(f"Processing non-streaming response for Model: {request.model}")
|
127 |
|
|
|
128 |
agent_mode = AGENT_MODE.get(request.model, {})
|
129 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
130 |
-
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
131 |
-
|
132 |
-
headers_api_chat = get_headers_api_chat(BASE_URL)
|
133 |
-
headers_chat = get_headers_chat(BASE_URL, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
|
134 |
-
validated_token = validate.getHid()
|
135 |
-
|
136 |
-
if request.model == 'o1-preview':
|
137 |
-
delay_seconds = random.randint(20, 60)
|
138 |
-
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
|
139 |
-
await asyncio.sleep(delay_seconds)
|
140 |
|
141 |
json_data = {
|
142 |
-
"
|
143 |
-
"
|
144 |
-
"
|
145 |
-
"clickedForceWebSearch": False,
|
146 |
"codeModelMode": True,
|
147 |
-
"
|
148 |
-
"
|
149 |
"isMicMode": False,
|
|
|
150 |
"maxTokens": request.max_tokens,
|
151 |
-
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
|
152 |
-
"mobileClient": False,
|
153 |
-
"playgroundTemperature": request.temperature,
|
154 |
"playgroundTopP": request.top_p,
|
155 |
-
"
|
156 |
-
"
|
157 |
-
"
|
158 |
-
"
|
159 |
-
"
|
160 |
-
"
|
161 |
"visitFromDelta": False,
|
|
|
|
|
|
|
162 |
}
|
163 |
|
164 |
full_response = ""
|
165 |
async with httpx.AsyncClient() as client:
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
except httpx.RequestError as e:
|
177 |
-
logger.error(f"Error occurred during request: {e}")
|
178 |
-
raise HTTPException(status_code=500, detail=str(e))
|
179 |
-
|
180 |
return {
|
181 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
182 |
"object": "chat.completion",
|
|
|
1 |
from datetime import datetime
|
2 |
+
from http.client import HTTPException
|
3 |
+
import json
|
|
|
4 |
from typing import Any, Dict, Optional
|
5 |
+
import uuid
|
6 |
|
7 |
import httpx
|
8 |
+
from api import validate
|
9 |
+
from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE
|
10 |
+
from fastapi import Depends, security
|
11 |
+
from fastapi.security import HTTPAuthorizationCredentials
|
12 |
+
|
13 |
+
from api.config import APP_SECRET, BASE_URL
|
|
|
|
|
|
|
|
|
|
|
14 |
from api.models import ChatRequest
|
15 |
+
|
16 |
from api.logger import setup_logger
|
17 |
|
18 |
logger = setup_logger(__name__)
|
19 |
|
20 |
+
|
21 |
def create_chat_completion_data(
|
22 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
23 |
) -> Dict[str, Any]:
|
|
|
36 |
"usage": None,
|
37 |
}
|
38 |
|
39 |
+
|
40 |
+
def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
|
41 |
+
if credentials.credentials != APP_SECRET:
|
42 |
+
raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
|
43 |
+
return credentials.credentials
|
44 |
+
|
45 |
+
|
46 |
+
def message_to_dict(message):
|
47 |
+
if isinstance(message.content, str):
|
48 |
+
return {"role": message.role, "content": message.content}
|
49 |
+
elif isinstance(message.content, list) and len(message.content) == 2:
|
50 |
return {
|
51 |
"role": message.role,
|
52 |
+
"content": message.content[0]["text"],
|
53 |
"data": {
|
54 |
"imageBase64": message.content[1]["image_url"]["url"],
|
55 |
"fileText": "",
|
56 |
"title": "snapshot",
|
57 |
},
|
58 |
}
|
59 |
+
else:
|
60 |
+
return {"role": message.role, "content": message.content}
|
61 |
|
|
|
|
|
|
|
62 |
|
63 |
+
async def process_streaming_response(request: ChatRequest):
|
64 |
agent_mode = AGENT_MODE.get(request.model, {})
|
65 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
json_data = {
|
68 |
+
"messages": [message_to_dict(msg) for msg in request.messages],
|
69 |
+
"previewToken": None,
|
70 |
+
"userId": None,
|
|
|
71 |
"codeModelMode": True,
|
72 |
+
"agentMode": agent_mode, # Populate agentMode
|
73 |
+
"trendingAgentMode": trending_agent_mode, # Populate trendingAgentMode
|
74 |
"isMicMode": False,
|
75 |
+
"userSystemPrompt": None,
|
76 |
"maxTokens": request.max_tokens,
|
|
|
|
|
|
|
77 |
"playgroundTopP": request.top_p,
|
78 |
+
"playgroundTemperature": request.temperature,
|
79 |
+
"isChromeExt": False,
|
80 |
+
"githubToken": None,
|
81 |
+
"clickedAnswer2": False,
|
82 |
+
"clickedAnswer3": False,
|
83 |
+
"clickedForceWebSearch": False,
|
84 |
"visitFromDelta": False,
|
85 |
+
"mobileClient": False,
|
86 |
+
"userSelectedModel": MODEL_MAPPING.get(request.model),
|
87 |
+
"validated": validate.getHid()
|
88 |
}
|
89 |
|
90 |
async with httpx.AsyncClient() as client:
|
|
|
92 |
async with client.stream(
|
93 |
"POST",
|
94 |
f"{BASE_URL}/api/chat",
|
95 |
+
headers=headers,
|
96 |
json=json_data,
|
97 |
timeout=100,
|
98 |
) as response:
|
|
|
100 |
async for line in response.aiter_lines():
|
101 |
timestamp = int(datetime.now().timestamp())
|
102 |
if line:
|
103 |
+
content = line + "\n"
|
104 |
+
if "https://www.blackbox.ai" in content:
|
105 |
+
validate.getHid(True)
|
106 |
+
content = "hid已刷新,重新对话即可\n"
|
107 |
+
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
108 |
+
break
|
109 |
+
if content.startswith("$@$v=undefined-rv1$@$"):
|
110 |
+
yield f"data: {json.dumps(create_chat_completion_data(content[21:], request.model, timestamp))}\n\n"
|
111 |
+
else:
|
112 |
+
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
113 |
+
|
114 |
+
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
115 |
yield "data: [DONE]\n\n"
|
116 |
except httpx.HTTPStatusError as e:
|
117 |
logger.error(f"HTTP error occurred: {e}")
|
|
|
120 |
logger.error(f"Error occurred during request: {e}")
|
121 |
raise HTTPException(status_code=500, detail=str(e))
|
122 |
|
|
|
|
|
|
|
123 |
|
124 |
+
async def process_non_streaming_response(request: ChatRequest):
|
125 |
agent_mode = AGENT_MODE.get(request.model, {})
|
126 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
127 |
|
128 |
json_data = {
|
129 |
+
"messages": [message_to_dict(msg) for msg in request.messages],
|
130 |
+
"previewToken": None,
|
131 |
+
"userId": None,
|
|
|
132 |
"codeModelMode": True,
|
133 |
+
"agentMode": agent_mode, # Populate agentMode
|
134 |
+
"trendingAgentMode": trending_agent_mode, # Populate trendingAgentMode
|
135 |
"isMicMode": False,
|
136 |
+
"userSystemPrompt": None,
|
137 |
"maxTokens": request.max_tokens,
|
|
|
|
|
|
|
138 |
"playgroundTopP": request.top_p,
|
139 |
+
"playgroundTemperature": request.temperature,
|
140 |
+
"isChromeExt": False,
|
141 |
+
"githubToken": None,
|
142 |
+
"clickedAnswer2": False,
|
143 |
+
"clickedAnswer3": False,
|
144 |
+
"clickedForceWebSearch": False,
|
145 |
"visitFromDelta": False,
|
146 |
+
"mobileClient": False,
|
147 |
+
"userSelectedModel": MODEL_MAPPING.get(request.model),
|
148 |
+
"validated": validate.getHid()
|
149 |
}
|
150 |
|
151 |
full_response = ""
|
152 |
async with httpx.AsyncClient() as client:
|
153 |
+
async with client.stream(
|
154 |
+
method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
|
155 |
+
) as response:
|
156 |
+
async for chunk in response.aiter_text():
|
157 |
+
full_response += chunk
|
158 |
+
if "https://www.blackbox.ai" in full_response:
|
159 |
+
validate.getHid(True)
|
160 |
+
full_response = "hid已刷新,重新对话即可"
|
161 |
+
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
162 |
+
full_response = full_response[21:]
|
|
|
|
|
|
|
|
|
163 |
return {
|
164 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
165 |
"object": "chat.completion",
|