Update api/utils.py
Browse files- api/utils.py +44 -104
api/utils.py
CHANGED
@@ -5,34 +5,21 @@ import uuid
|
|
5 |
import re
|
6 |
|
7 |
import httpx
|
8 |
-
from api.config import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
from fastapi import HTTPException
|
10 |
-
from api.models import ChatRequest
|
11 |
|
12 |
from api.logger import setup_logger
|
13 |
|
14 |
logger = setup_logger(__name__)
|
15 |
|
16 |
-
model_prefixes = {
|
17 |
-
'gpt-4o': '@GPT-4o',
|
18 |
-
'gemini-pro': '@Gemini-PRO',
|
19 |
-
'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
|
20 |
-
'PythonAgent': '@Python Agent',
|
21 |
-
'JavaAgent': '@Java Agent',
|
22 |
-
'JavaScriptAgent': '@JavaScript Agent',
|
23 |
-
'HTMLAgent': '@HTML Agent',
|
24 |
-
'GoogleCloudAgent': '@Google Cloud Agent',
|
25 |
-
'AndroidDeveloper': '@Android Developer',
|
26 |
-
'SwiftDeveloper': '@Swift Developer',
|
27 |
-
'Next.jsAgent': '@Next.js Agent',
|
28 |
-
'MongoDBAgent': '@MongoDB Agent',
|
29 |
-
'PyTorchAgent': '@PyTorch Agent',
|
30 |
-
'ReactAgent': '@React Agent',
|
31 |
-
'XcodeAgent': '@Xcode Agent',
|
32 |
-
'AngularJSAgent': '@AngularJS Agent',
|
33 |
-
'blackboxai-pro': '@BLACKBOXAI-PRO',
|
34 |
-
'ImageGeneration': '@Image Generation',
|
35 |
-
}
|
36 |
|
37 |
def create_chat_completion_data(
|
38 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
@@ -59,9 +46,9 @@ def message_to_dict(message):
|
|
59 |
elif isinstance(message.content, list) and len(message.content) == 2:
|
60 |
return {
|
61 |
"role": message.role,
|
62 |
-
"content": message.content[0]
|
63 |
"data": {
|
64 |
-
"imageBase64": message.content[1]
|
65 |
"fileText": "",
|
66 |
"title": "snapshot",
|
67 |
},
|
@@ -71,29 +58,16 @@ def message_to_dict(message):
|
|
71 |
|
72 |
|
73 |
async def process_streaming_response(request: ChatRequest):
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
# Format messages with prefix
|
82 |
-
formatted_messages = []
|
83 |
-
for msg in request.messages:
|
84 |
-
formatted_content = msg.content
|
85 |
-
if prefix and msg.role.lower() == "user":
|
86 |
-
formatted_content = f"{prefix} {formatted_content}"
|
87 |
-
# Ensure 'data' is included if present
|
88 |
-
data = msg.content.get('data') if isinstance(msg.content, dict) else None
|
89 |
-
formatted_messages.append({
|
90 |
-
"role": msg.role,
|
91 |
-
"content": formatted_content,
|
92 |
-
"data": data
|
93 |
-
})
|
94 |
|
95 |
json_data = {
|
96 |
-
"messages":
|
97 |
"previewToken": None,
|
98 |
"userId": None,
|
99 |
"codeModelMode": True,
|
@@ -111,10 +85,11 @@ async def process_streaming_response(request: ChatRequest):
|
|
111 |
"clickedForceWebSearch": False,
|
112 |
"visitFromDelta": False,
|
113 |
"mobileClient": False,
|
114 |
-
"webSearchMode": False, # Set to True if web search is needed
|
115 |
"userSelectedModel": model,
|
116 |
}
|
117 |
|
|
|
|
|
118 |
async with httpx.AsyncClient() as client:
|
119 |
try:
|
120 |
async with client.stream(
|
@@ -131,17 +106,11 @@ async def process_streaming_response(request: ChatRequest):
|
|
131 |
content = line
|
132 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
133 |
content = content[21:]
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
image_response = ImageResponse(images=image_data, alt="Generated Image")
|
140 |
-
yield image_response
|
141 |
-
else:
|
142 |
-
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
143 |
-
|
144 |
-
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
145 |
yield "data: [DONE]\n\n"
|
146 |
except httpx.HTTPStatusError as e:
|
147 |
logger.error(f"HTTP error occurred: {e}")
|
@@ -152,29 +121,16 @@ async def process_streaming_response(request: ChatRequest):
|
|
152 |
|
153 |
|
154 |
async def process_non_streaming_response(request: ChatRequest):
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
# Format messages with prefix
|
163 |
-
formatted_messages = []
|
164 |
-
for msg in request.messages:
|
165 |
-
formatted_content = msg.content
|
166 |
-
if prefix and msg.role.lower() == "user":
|
167 |
-
formatted_content = f"{prefix} {formatted_content}"
|
168 |
-
# Ensure 'data' is included if present
|
169 |
-
data = msg.content.get('data') if isinstance(msg.content, dict) else None
|
170 |
-
formatted_messages.append({
|
171 |
-
"role": msg.role,
|
172 |
-
"content": formatted_content,
|
173 |
-
"data": data
|
174 |
-
})
|
175 |
|
176 |
json_data = {
|
177 |
-
"messages":
|
178 |
"previewToken": None,
|
179 |
"userId": None,
|
180 |
"codeModelMode": True,
|
@@ -192,20 +148,23 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
192 |
"clickedForceWebSearch": False,
|
193 |
"visitFromDelta": False,
|
194 |
"mobileClient": False,
|
195 |
-
"webSearchMode": False, # Set to True if web search is needed
|
196 |
"userSelectedModel": model,
|
197 |
}
|
|
|
|
|
|
|
198 |
full_response = ""
|
199 |
async with httpx.AsyncClient() as client:
|
200 |
try:
|
201 |
-
async with client.
|
202 |
-
|
|
|
203 |
headers=headers,
|
204 |
json=json_data,
|
205 |
-
timeout=100,
|
206 |
) as response:
|
207 |
response.raise_for_status()
|
208 |
-
|
|
|
209 |
except httpx.HTTPStatusError as e:
|
210 |
logger.error(f"HTTP error occurred: {e}")
|
211 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
@@ -216,32 +175,13 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
216 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
217 |
full_response = full_response[21:]
|
218 |
|
219 |
-
|
220 |
-
match_data_uri = re.search(r'!\[.*?\]\((data:image/[^;]+;base64,[^\)]+)\)', full_response)
|
221 |
-
if match_data_uri:
|
222 |
-
image_data = match_data_uri.group(1)
|
223 |
-
# Optionally, you can convert data URI to a more usable format or pass it directly
|
224 |
-
return {
|
225 |
-
"id": f"chatcmpl-{uuid.uuid4()}",
|
226 |
-
"object": "chat.completion",
|
227 |
-
"created": int(datetime.now().timestamp()),
|
228 |
-
"model": request.model,
|
229 |
-
"choices": [
|
230 |
-
{
|
231 |
-
"index": 0,
|
232 |
-
"message": {"role": "assistant", "content": full_response},
|
233 |
-
"finish_reason": "stop",
|
234 |
-
}
|
235 |
-
],
|
236 |
-
"usage": None,
|
237 |
-
"imageData": image_data # Add image data to the response if needed
|
238 |
-
}
|
239 |
|
240 |
return {
|
241 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
242 |
"object": "chat.completion",
|
243 |
"created": int(datetime.now().timestamp()),
|
244 |
-
"model":
|
245 |
"choices": [
|
246 |
{
|
247 |
"index": 0,
|
|
|
5 |
import re
|
6 |
|
7 |
import httpx
|
8 |
+
from api.config import (
|
9 |
+
MODEL_MAPPING,
|
10 |
+
MODEL_ALIASES,
|
11 |
+
headers,
|
12 |
+
AGENT_MODE,
|
13 |
+
TRENDING_AGENT_MODE,
|
14 |
+
BASE_URL
|
15 |
+
)
|
16 |
from fastapi import HTTPException
|
17 |
+
from api.models import ChatRequest
|
18 |
|
19 |
from api.logger import setup_logger
|
20 |
|
21 |
logger = setup_logger(__name__)
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
def create_chat_completion_data(
|
25 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
|
|
46 |
elif isinstance(message.content, list) and len(message.content) == 2:
|
47 |
return {
|
48 |
"role": message.role,
|
49 |
+
"content": message.content[0]["text"],
|
50 |
"data": {
|
51 |
+
"imageBase64": message.content[1]["image_url"]["url"],
|
52 |
"fileText": "",
|
53 |
"title": "snapshot",
|
54 |
},
|
|
|
58 |
|
59 |
|
60 |
async def process_streaming_response(request: ChatRequest):
|
61 |
+
# Map the requested model to the actual model used by the API
|
62 |
+
model = MODEL_MAPPING.get(request.model, MODEL_ALIASES.get(request.model, "blackboxai"))
|
63 |
+
|
64 |
+
logger.info(f"Using model: {model}")
|
65 |
+
|
66 |
+
agent_mode = AGENT_MODE.get(model, {})
|
67 |
+
trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
|
69 |
json_data = {
|
70 |
+
"messages": [message_to_dict(msg) for msg in request.messages],
|
71 |
"previewToken": None,
|
72 |
"userId": None,
|
73 |
"codeModelMode": True,
|
|
|
85 |
"clickedForceWebSearch": False,
|
86 |
"visitFromDelta": False,
|
87 |
"mobileClient": False,
|
|
|
88 |
"userSelectedModel": model,
|
89 |
}
|
90 |
|
91 |
+
logger.debug(f"Payload for streaming request: {json.dumps(json_data)}")
|
92 |
+
|
93 |
async with httpx.AsyncClient() as client:
|
94 |
try:
|
95 |
async with client.stream(
|
|
|
106 |
content = line
|
107 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
108 |
content = content[21:]
|
109 |
+
yield f"data: {json.dumps(create_chat_completion_data(content, model, timestamp))}\n\n"
|
110 |
+
|
111 |
+
# Indicate the end of the stream
|
112 |
+
timestamp = int(datetime.now().timestamp())
|
113 |
+
yield f"data: {json.dumps(create_chat_completion_data('', model, timestamp, 'stop'))}\n\n"
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
yield "data: [DONE]\n\n"
|
115 |
except httpx.HTTPStatusError as e:
|
116 |
logger.error(f"HTTP error occurred: {e}")
|
|
|
121 |
|
122 |
|
123 |
async def process_non_streaming_response(request: ChatRequest):
|
124 |
+
# Map the requested model to the actual model used by the API
|
125 |
+
model = MODEL_MAPPING.get(request.model, MODEL_ALIASES.get(request.model, "blackboxai"))
|
126 |
+
|
127 |
+
logger.info(f"Using model: {model}")
|
128 |
+
|
129 |
+
agent_mode = AGENT_MODE.get(model, {})
|
130 |
+
trending_agent_mode = TRENDING_AGENT_MODE.get(model, {})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
|
132 |
json_data = {
|
133 |
+
"messages": [message_to_dict(msg) for msg in request.messages],
|
134 |
"previewToken": None,
|
135 |
"userId": None,
|
136 |
"codeModelMode": True,
|
|
|
148 |
"clickedForceWebSearch": False,
|
149 |
"visitFromDelta": False,
|
150 |
"mobileClient": False,
|
|
|
151 |
"userSelectedModel": model,
|
152 |
}
|
153 |
+
|
154 |
+
logger.debug(f"Payload for non-streaming request: {json.dumps(json_data)}")
|
155 |
+
|
156 |
full_response = ""
|
157 |
async with httpx.AsyncClient() as client:
|
158 |
try:
|
159 |
+
async with client.stream(
|
160 |
+
method="POST",
|
161 |
+
url=f"{BASE_URL}/api/chat",
|
162 |
headers=headers,
|
163 |
json=json_data,
|
|
|
164 |
) as response:
|
165 |
response.raise_for_status()
|
166 |
+
async for chunk in response.aiter_text():
|
167 |
+
full_response += chunk
|
168 |
except httpx.HTTPStatusError as e:
|
169 |
logger.error(f"HTTP error occurred: {e}")
|
170 |
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
|
|
175 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
176 |
full_response = full_response[21:]
|
177 |
|
178 |
+
logger.debug(f"Full non-streaming response: {full_response}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
return {
|
181 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
182 |
"object": "chat.completion",
|
183 |
"created": int(datetime.now().timestamp()),
|
184 |
+
"model": model,
|
185 |
"choices": [
|
186 |
{
|
187 |
"index": 0,
|