Update api/utils.py
Browse files- api/utils.py +35 -15
api/utils.py
CHANGED
@@ -6,10 +6,10 @@ import uuid
|
|
6 |
import httpx
|
7 |
from api.config import (
|
8 |
MODEL_MAPPING,
|
|
|
9 |
AGENT_MODE,
|
10 |
TRENDING_AGENT_MODE,
|
11 |
-
|
12 |
-
BASE_URL, # Import BASE_URL
|
13 |
)
|
14 |
from fastapi import HTTPException
|
15 |
from api.models import ChatRequest
|
@@ -17,10 +17,29 @@ from api.logger import setup_logger
|
|
17 |
|
18 |
logger = setup_logger(__name__)
|
19 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
def create_chat_completion_data(
|
21 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
22 |
) -> Dict[str, Any]:
|
23 |
-
#
|
|
|
24 |
return {
|
25 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
26 |
"object": "chat.completion.chunk",
|
@@ -29,7 +48,7 @@ def create_chat_completion_data(
|
|
29 |
"choices": [
|
30 |
{
|
31 |
"index": 0,
|
32 |
-
"delta": {"content":
|
33 |
"finish_reason": finish_reason,
|
34 |
}
|
35 |
],
|
@@ -49,7 +68,7 @@ def message_to_dict(message):
|
|
49 |
"data": {
|
50 |
"imageBase64": message.content[1]["image_url"]["url"],
|
51 |
"fileText": "",
|
52 |
-
"title": "
|
53 |
},
|
54 |
}
|
55 |
else:
|
@@ -93,20 +112,18 @@ async def process_streaming_response(request: ChatRequest):
|
|
93 |
async for line in response.aiter_lines():
|
94 |
timestamp = int(datetime.now().timestamp())
|
95 |
if line:
|
96 |
-
content = line
|
97 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
98 |
-
|
99 |
-
|
100 |
-
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
101 |
-
|
102 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
103 |
yield "data: [DONE]\n\n"
|
104 |
except httpx.HTTPStatusError as e:
|
105 |
logger.error(f"HTTP error occurred: {e}")
|
106 |
-
raise HTTPException(status_code=e.response.status_code, detail=
|
107 |
except httpx.RequestError as e:
|
108 |
logger.error(f"Error occurred during request: {e}")
|
109 |
-
raise HTTPException(status_code=500, detail=
|
110 |
|
111 |
async def process_non_streaming_response(request: ChatRequest):
|
112 |
agent_mode = AGENT_MODE.get(request.model, {})
|
@@ -143,13 +160,16 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
143 |
full_response += chunk
|
144 |
except httpx.HTTPStatusError as e:
|
145 |
logger.error(f"HTTP error occurred: {e}")
|
146 |
-
raise HTTPException(status_code=e.response.status_code, detail=
|
147 |
except httpx.RequestError as e:
|
148 |
logger.error(f"Error occurred during request: {e}")
|
149 |
-
raise HTTPException(status_code=500, detail=
|
150 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
151 |
full_response = full_response[21:]
|
152 |
|
|
|
|
|
|
|
153 |
return {
|
154 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
155 |
"object": "chat.completion",
|
@@ -158,7 +178,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
158 |
"choices": [
|
159 |
{
|
160 |
"index": 0,
|
161 |
-
"message": {"role": "assistant", "content":
|
162 |
"finish_reason": "stop",
|
163 |
}
|
164 |
],
|
|
|
6 |
import httpx
|
7 |
from api.config import (
|
8 |
MODEL_MAPPING,
|
9 |
+
headers,
|
10 |
AGENT_MODE,
|
11 |
TRENDING_AGENT_MODE,
|
12 |
+
BASE_URL,
|
|
|
13 |
)
|
14 |
from fastapi import HTTPException
|
15 |
from api.models import ChatRequest
|
|
|
17 |
|
18 |
logger = setup_logger(__name__)
|
19 |
|
20 |
+
def clean_content(content: str) -> str:
|
21 |
+
"""
|
22 |
+
Remove unnecessary blank lines while preserving code formatting.
|
23 |
+
"""
|
24 |
+
lines = content.splitlines()
|
25 |
+
cleaned_lines = []
|
26 |
+
previous_line_empty = False
|
27 |
+
for line in lines:
|
28 |
+
if line.strip() == '':
|
29 |
+
if not previous_line_empty:
|
30 |
+
cleaned_lines.append('')
|
31 |
+
previous_line_empty = True
|
32 |
+
# Skip adding multiple consecutive empty lines
|
33 |
+
else:
|
34 |
+
cleaned_lines.append(line.rstrip())
|
35 |
+
previous_line_empty = False
|
36 |
+
return '\n'.join(cleaned_lines)
|
37 |
+
|
38 |
def create_chat_completion_data(
|
39 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
40 |
) -> Dict[str, Any]:
|
41 |
+
# Clean the content to remove extra blank lines but preserve formatting
|
42 |
+
cleaned_content = clean_content(content)
|
43 |
return {
|
44 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
45 |
"object": "chat.completion.chunk",
|
|
|
48 |
"choices": [
|
49 |
{
|
50 |
"index": 0,
|
51 |
+
"delta": {"content": cleaned_content, "role": "assistant"},
|
52 |
"finish_reason": finish_reason,
|
53 |
}
|
54 |
],
|
|
|
68 |
"data": {
|
69 |
"imageBase64": message.content[1]["image_url"]["url"],
|
70 |
"fileText": "",
|
71 |
+
"title": "snapshot",
|
72 |
},
|
73 |
}
|
74 |
else:
|
|
|
112 |
async for line in response.aiter_lines():
|
113 |
timestamp = int(datetime.now().timestamp())
|
114 |
if line:
|
115 |
+
content = line
|
116 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
117 |
+
content = content[21:]
|
118 |
+
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
|
|
|
|
119 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
120 |
yield "data: [DONE]\n\n"
|
121 |
except httpx.HTTPStatusError as e:
|
122 |
logger.error(f"HTTP error occurred: {e}")
|
123 |
+
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
124 |
except httpx.RequestError as e:
|
125 |
logger.error(f"Error occurred during request: {e}")
|
126 |
+
raise HTTPException(status_code=500, detail=str(e))
|
127 |
|
128 |
async def process_non_streaming_response(request: ChatRequest):
|
129 |
agent_mode = AGENT_MODE.get(request.model, {})
|
|
|
160 |
full_response += chunk
|
161 |
except httpx.HTTPStatusError as e:
|
162 |
logger.error(f"HTTP error occurred: {e}")
|
163 |
+
raise HTTPException(status_code=e.response.status_code, detail=str(e))
|
164 |
except httpx.RequestError as e:
|
165 |
logger.error(f"Error occurred during request: {e}")
|
166 |
+
raise HTTPException(status_code=500, detail=str(e))
|
167 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
168 |
full_response = full_response[21:]
|
169 |
|
170 |
+
# Clean the content to remove extra blank lines but preserve formatting
|
171 |
+
cleaned_response = clean_content(full_response)
|
172 |
+
|
173 |
return {
|
174 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
175 |
"object": "chat.completion",
|
|
|
178 |
"choices": [
|
179 |
{
|
180 |
"index": 0,
|
181 |
+
"message": {"role": "assistant", "content": cleaned_response},
|
182 |
"finish_reason": "stop",
|
183 |
}
|
184 |
],
|