Update api/utils.py
Browse files- api/utils.py +6 -6
api/utils.py
CHANGED
@@ -71,7 +71,7 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
|
|
71 |
"""Remove the model prefix from the response content if present."""
|
72 |
if model_prefix and content.startswith(model_prefix):
|
73 |
logger.debug(f"Stripping prefix '{model_prefix}' from content.")
|
74 |
-
return content[len(model_prefix):].
|
75 |
return content
|
76 |
|
77 |
async def process_streaming_response(request: ChatRequest):
|
@@ -118,15 +118,15 @@ async def process_streaming_response(request: ChatRequest):
|
|
118 |
async for line in response.aiter_lines():
|
119 |
timestamp = int(datetime.now().timestamp())
|
120 |
if line:
|
121 |
-
content = line
|
122 |
if "https://www.blackbox.ai" in content:
|
123 |
validate.getHid(True)
|
124 |
-
content = "The HID has been refreshed; please try again
|
125 |
logger.info(f"hid refreshed due to content: {content}")
|
126 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
127 |
break
|
128 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
129 |
-
content = content[21:]
|
130 |
# Strip model prefix from content
|
131 |
cleaned_content = strip_model_prefix(content, model_prefix)
|
132 |
yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
|
@@ -192,9 +192,9 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
192 |
logger.info("HID refreshed due to the content of the response.")
|
193 |
|
194 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
195 |
-
full_response = full_response[21:]
|
196 |
# Strip model prefix from full_response
|
197 |
-
cleaned_full_response = strip_model_prefix(full_response, model_prefix)
|
198 |
return {
|
199 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
200 |
"object": "chat.completion",
|
|
|
71 |
"""Remove the model prefix from the response content if present."""
|
72 |
if model_prefix and content.startswith(model_prefix):
|
73 |
logger.debug(f"Stripping prefix '{model_prefix}' from content.")
|
74 |
+
return content[len(model_prefix):].lstrip()
|
75 |
return content
|
76 |
|
77 |
async def process_streaming_response(request: ChatRequest):
|
|
|
118 |
async for line in response.aiter_lines():
|
119 |
timestamp = int(datetime.now().timestamp())
|
120 |
if line:
|
121 |
+
content = line.strip() # Remove leading/trailing whitespace
|
122 |
if "https://www.blackbox.ai" in content:
|
123 |
validate.getHid(True)
|
124 |
+
content = "The HID has been refreshed; please try again."
|
125 |
logger.info(f"hid refreshed due to content: {content}")
|
126 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
127 |
break
|
128 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
129 |
+
content = content[21:].lstrip()
|
130 |
# Strip model prefix from content
|
131 |
cleaned_content = strip_model_prefix(content, model_prefix)
|
132 |
yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
|
|
|
192 |
logger.info("HID refreshed due to the content of the response.")
|
193 |
|
194 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
195 |
+
full_response = full_response[21:].lstrip()
|
196 |
# Strip model prefix from full_response
|
197 |
+
cleaned_full_response = strip_model_prefix(full_response.strip(), model_prefix)
|
198 |
return {
|
199 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
200 |
"object": "chat.completion",
|