Update api/utils.py
Browse files- api/utils.py +25 -7
api/utils.py
CHANGED
@@ -1,7 +1,8 @@
|
|
|
|
|
|
1 |
from datetime import datetime
|
2 |
import json
|
3 |
import uuid
|
4 |
-
import asyncio
|
5 |
import random
|
6 |
from typing import Any, Dict, Optional
|
7 |
|
@@ -14,6 +15,7 @@ from api.config import (
|
|
14 |
TRENDING_AGENT_MODE,
|
15 |
BASE_URL,
|
16 |
generate_id,
|
|
|
17 |
)
|
18 |
from api.models import ChatRequest
|
19 |
from api.logger import setup_logger
|
@@ -50,10 +52,17 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
|
50 |
|
51 |
# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
|
52 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
53 |
-
content =
|
|
|
|
|
|
|
54 |
if model_prefix:
|
55 |
content = f"{model_prefix} {content}"
|
56 |
-
if
|
|
|
|
|
|
|
|
|
57 |
# Ensure base64 images are always included for all models
|
58 |
return {
|
59 |
"role": message.role,
|
@@ -105,7 +114,9 @@ async def process_streaming_response(request: ChatRequest):
|
|
105 |
"isChromeExt": False,
|
106 |
"isMicMode": False,
|
107 |
"maxTokens": request.max_tokens,
|
108 |
-
"messages": [
|
|
|
|
|
109 |
"mobileClient": False,
|
110 |
"playgroundTemperature": request.temperature,
|
111 |
"playgroundTopP": request.top_p,
|
@@ -134,7 +145,9 @@ async def process_streaming_response(request: ChatRequest):
|
|
134 |
content = line
|
135 |
if "https://www.blackbox.ai" in content:
|
136 |
getHid(True)
|
137 |
-
content =
|
|
|
|
|
138 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
139 |
break
|
140 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
@@ -190,7 +203,9 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
190 |
"isChromeExt": False,
|
191 |
"isMicMode": False,
|
192 |
"maxTokens": request.max_tokens,
|
193 |
-
"messages": [
|
|
|
|
|
194 |
"mobileClient": False,
|
195 |
"playgroundTemperature": request.temperature,
|
196 |
"playgroundTopP": request.top_p,
|
@@ -207,7 +222,10 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
207 |
async with httpx.AsyncClient() as client:
|
208 |
try:
|
209 |
async with client.stream(
|
210 |
-
method="POST",
|
|
|
|
|
|
|
211 |
) as response:
|
212 |
response.raise_for_status()
|
213 |
async for chunk in response.aiter_text():
|
|
|
1 |
+
# utils.py
|
2 |
+
|
3 |
from datetime import datetime
|
4 |
import json
|
5 |
import uuid
|
|
|
6 |
import random
|
7 |
from typing import Any, Dict, Optional
|
8 |
|
|
|
15 |
TRENDING_AGENT_MODE,
|
16 |
BASE_URL,
|
17 |
generate_id,
|
18 |
+
USER_SELECTED_MODELS,
|
19 |
)
|
20 |
from api.models import ChatRequest
|
21 |
from api.logger import setup_logger
|
|
|
52 |
|
53 |
# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
|
54 |
def message_to_dict(message, model_prefix: Optional[str] = None):
|
55 |
+
content = (
|
56 |
+
message.content if isinstance(message.content, str)
|
57 |
+
else message.content[0]["text"]
|
58 |
+
)
|
59 |
if model_prefix:
|
60 |
content = f"{model_prefix} {content}"
|
61 |
+
if (
|
62 |
+
isinstance(message.content, list)
|
63 |
+
and len(message.content) == 2
|
64 |
+
and "image_url" in message.content[1]
|
65 |
+
):
|
66 |
# Ensure base64 images are always included for all models
|
67 |
return {
|
68 |
"role": message.role,
|
|
|
114 |
"isChromeExt": False,
|
115 |
"isMicMode": False,
|
116 |
"maxTokens": request.max_tokens,
|
117 |
+
"messages": [
|
118 |
+
message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
|
119 |
+
],
|
120 |
"mobileClient": False,
|
121 |
"playgroundTemperature": request.temperature,
|
122 |
"playgroundTopP": request.top_p,
|
|
|
145 |
content = line
|
146 |
if "https://www.blackbox.ai" in content:
|
147 |
getHid(True)
|
148 |
+
content = (
|
149 |
+
"HID has been refreshed, please start a new conversation.\n"
|
150 |
+
)
|
151 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
152 |
break
|
153 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
|
|
203 |
"isChromeExt": False,
|
204 |
"isMicMode": False,
|
205 |
"maxTokens": request.max_tokens,
|
206 |
+
"messages": [
|
207 |
+
message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
|
208 |
+
],
|
209 |
"mobileClient": False,
|
210 |
"playgroundTemperature": request.temperature,
|
211 |
"playgroundTopP": request.top_p,
|
|
|
222 |
async with httpx.AsyncClient() as client:
|
223 |
try:
|
224 |
async with client.stream(
|
225 |
+
method="POST",
|
226 |
+
url=f"{BASE_URL}/api/chat",
|
227 |
+
headers=headers_api_chat,
|
228 |
+
json=json_data,
|
229 |
) as response:
|
230 |
response.raise_for_status()
|
231 |
async for chunk in response.aiter_text():
|