Update api/utils.py
Browse files- api/utils.py +50 -25
api/utils.py
CHANGED
@@ -6,29 +6,40 @@ import uuid
|
|
6 |
|
7 |
import httpx
|
8 |
from api import validate
|
9 |
-
from api.config import
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
from api.config import APP_SECRET, BASE_URL
|
14 |
from api.models import ChatRequest
|
15 |
-
|
16 |
from api.logger import setup_logger
|
17 |
|
18 |
logger = setup_logger(__name__)
|
19 |
|
|
|
|
|
|
|
20 |
def create_chat_completion_data(
|
21 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
22 |
) -> Dict[str, Any]:
|
|
|
|
|
23 |
return {
|
24 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
25 |
"object": "chat.completion.chunk",
|
26 |
"created": timestamp,
|
27 |
-
"model": model,
|
28 |
"choices": [
|
29 |
{
|
30 |
"index": 0,
|
31 |
-
"delta": {"content":
|
32 |
"finish_reason": finish_reason,
|
33 |
}
|
34 |
],
|
@@ -41,28 +52,34 @@ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(securi
|
|
41 |
return credentials.credentials
|
42 |
|
43 |
def message_to_dict(message):
|
|
|
|
|
44 |
if isinstance(message.content, str):
|
45 |
-
|
46 |
elif isinstance(message.content, list) and len(message.content) == 2:
|
47 |
-
|
48 |
-
"role": message.role,
|
49 |
-
"content": message.content[0]["text"],
|
50 |
-
"data": {
|
51 |
-
"imageBase64": message.content[1]["image_url"]["url"],
|
52 |
-
"fileText": "",
|
53 |
-
"title": "snapshot",
|
54 |
-
},
|
55 |
-
}
|
56 |
else:
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
async def process_streaming_response(request: ChatRequest):
|
60 |
agent_mode = AGENT_MODE.get(request.model, {})
|
61 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
|
|
62 |
|
63 |
-
# Log reduced information
|
64 |
logger.info(
|
65 |
-
f"Streaming request for model: '{request.model}', "
|
66 |
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
|
67 |
)
|
68 |
|
@@ -86,7 +103,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
86 |
"visitFromDelta": False,
|
87 |
"mobileClient": False,
|
88 |
"userSelectedModel": MODEL_MAPPING.get(request.model),
|
89 |
-
"validated": validate.getHid()
|
90 |
}
|
91 |
|
92 |
async with httpx.AsyncClient() as client:
|
@@ -125,10 +142,11 @@ async def process_streaming_response(request: ChatRequest):
|
|
125 |
async def process_non_streaming_response(request: ChatRequest):
|
126 |
agent_mode = AGENT_MODE.get(request.model, {})
|
127 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
|
|
128 |
|
129 |
-
# Log reduced information
|
130 |
logger.info(
|
131 |
-
f"Non-streaming request for model: '{request.model}', "
|
132 |
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
|
133 |
)
|
134 |
|
@@ -152,7 +170,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
152 |
"visitFromDelta": False,
|
153 |
"mobileClient": False,
|
154 |
"userSelectedModel": MODEL_MAPPING.get(request.model),
|
155 |
-
"validated": validate.getHid()
|
156 |
}
|
157 |
|
158 |
full_response = ""
|
@@ -162,16 +180,23 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
162 |
) as response:
|
163 |
async for chunk in response.aiter_text():
|
164 |
full_response += chunk
|
|
|
165 |
if "https://www.blackbox.ai" in full_response:
|
166 |
validate.getHid(True)
|
167 |
full_response = "hid已刷新,重新对话即可"
|
|
|
168 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
169 |
full_response = full_response[21:]
|
|
|
|
|
|
|
|
|
|
|
170 |
return {
|
171 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
172 |
"object": "chat.completion",
|
173 |
"created": int(datetime.now().timestamp()),
|
174 |
-
"model": request.model,
|
175 |
"choices": [
|
176 |
{
|
177 |
"index": 0,
|
|
|
6 |
|
7 |
import httpx
|
8 |
from api import validate
|
9 |
+
from api.config import (
|
10 |
+
MODEL_MAPPING,
|
11 |
+
headers,
|
12 |
+
AGENT_MODE,
|
13 |
+
TRENDING_AGENT_MODE,
|
14 |
+
MODEL_PREFIXES,
|
15 |
+
APP_SECRET,
|
16 |
+
BASE_URL,
|
17 |
+
)
|
18 |
+
from fastapi import Depends, HTTPException
|
19 |
+
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
|
20 |
|
|
|
21 |
from api.models import ChatRequest
|
|
|
22 |
from api.logger import setup_logger
|
23 |
|
24 |
logger = setup_logger(__name__)
|
25 |
|
26 |
+
# Initialize HTTPBearer for security
|
27 |
+
security = HTTPBearer()
|
28 |
+
|
29 |
def create_chat_completion_data(
|
30 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
31 |
) -> Dict[str, Any]:
|
32 |
+
prefix = MODEL_PREFIXES.get(model, "")
|
33 |
+
full_content = f"{prefix} {content}" if prefix else content
|
34 |
return {
|
35 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
36 |
"object": "chat.completion.chunk",
|
37 |
"created": timestamp,
|
38 |
+
"model": f"{prefix}{model}" if prefix else model,
|
39 |
"choices": [
|
40 |
{
|
41 |
"index": 0,
|
42 |
+
"delta": {"content": full_content, "role": "assistant"},
|
43 |
"finish_reason": finish_reason,
|
44 |
}
|
45 |
],
|
|
|
52 |
return credentials.credentials
|
53 |
|
54 |
def message_to_dict(message):
|
55 |
+
prefix = MODEL_PREFIXES.get(message.model, "") if hasattr(message, 'model') else ""
|
56 |
+
|
57 |
if isinstance(message.content, str):
|
58 |
+
content = f"{prefix} {message.content}" if prefix else message.content
|
59 |
elif isinstance(message.content, list) and len(message.content) == 2:
|
60 |
+
content = f"{prefix} {message.content[0]['text']}" if prefix else message.content[0]['text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
else:
|
62 |
+
content = message.content
|
63 |
+
|
64 |
+
message_dict = {"role": message.role, "content": content}
|
65 |
+
|
66 |
+
if isinstance(message.content, list) and len(message.content) == 2:
|
67 |
+
message_dict["data"] = {
|
68 |
+
"imageBase64": message.content[1]["image_url"]["url"],
|
69 |
+
"fileText": "",
|
70 |
+
"title": "snapshot",
|
71 |
+
}
|
72 |
+
|
73 |
+
return message_dict
|
74 |
|
75 |
async def process_streaming_response(request: ChatRequest):
|
76 |
agent_mode = AGENT_MODE.get(request.model, {})
|
77 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
78 |
+
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
79 |
|
80 |
+
# Log reduced information with prefix
|
81 |
logger.info(
|
82 |
+
f"Streaming request for model: '{model_prefix}{request.model}', "
|
83 |
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
|
84 |
)
|
85 |
|
|
|
103 |
"visitFromDelta": False,
|
104 |
"mobileClient": False,
|
105 |
"userSelectedModel": MODEL_MAPPING.get(request.model),
|
106 |
+
"validated": validate.getHid(),
|
107 |
}
|
108 |
|
109 |
async with httpx.AsyncClient() as client:
|
|
|
142 |
async def process_non_streaming_response(request: ChatRequest):
|
143 |
agent_mode = AGENT_MODE.get(request.model, {})
|
144 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
145 |
+
model_prefix = MODEL_PREFIXES.get(request.model, "")
|
146 |
|
147 |
+
# Log reduced information with prefix
|
148 |
logger.info(
|
149 |
+
f"Non-streaming request for model: '{model_prefix}{request.model}', "
|
150 |
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}"
|
151 |
)
|
152 |
|
|
|
170 |
"visitFromDelta": False,
|
171 |
"mobileClient": False,
|
172 |
"userSelectedModel": MODEL_MAPPING.get(request.model),
|
173 |
+
"validated": validate.getHid(),
|
174 |
}
|
175 |
|
176 |
full_response = ""
|
|
|
180 |
) as response:
|
181 |
async for chunk in response.aiter_text():
|
182 |
full_response += chunk
|
183 |
+
|
184 |
if "https://www.blackbox.ai" in full_response:
|
185 |
validate.getHid(True)
|
186 |
full_response = "hid已刷新,重新对话即可"
|
187 |
+
|
188 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
189 |
full_response = full_response[21:]
|
190 |
+
|
191 |
+
# Prepend model prefix to the final response content
|
192 |
+
if model_prefix:
|
193 |
+
full_response = f"{model_prefix} {full_response}"
|
194 |
+
|
195 |
return {
|
196 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
197 |
"object": "chat.completion",
|
198 |
"created": int(datetime.now().timestamp()),
|
199 |
+
"model": f"{model_prefix}{request.model}" if model_prefix else request.model,
|
200 |
"choices": [
|
201 |
{
|
202 |
"index": 0,
|