Update api/utils.py
Browse files- api/utils.py +9 -35
api/utils.py
CHANGED
@@ -4,42 +4,18 @@ from typing import Any, Dict, Optional
|
|
4 |
import uuid
|
5 |
|
6 |
import httpx
|
7 |
-
from api.config import
|
8 |
-
MODEL_MAPPING,
|
9 |
-
headers,
|
10 |
-
AGENT_MODE,
|
11 |
-
TRENDING_AGENT_MODE,
|
12 |
-
BASE_URL,
|
13 |
-
)
|
14 |
from fastapi import HTTPException
|
15 |
from api.models import ChatRequest
|
|
|
16 |
from api.logger import setup_logger
|
17 |
|
18 |
logger = setup_logger(__name__)
|
19 |
|
20 |
-
def clean_content(content: str) -> str:
|
21 |
-
"""
|
22 |
-
Remove unnecessary blank lines while preserving code formatting.
|
23 |
-
"""
|
24 |
-
lines = content.splitlines()
|
25 |
-
cleaned_lines = []
|
26 |
-
previous_line_empty = False
|
27 |
-
for line in lines:
|
28 |
-
if line.strip() == '':
|
29 |
-
if not previous_line_empty:
|
30 |
-
cleaned_lines.append('')
|
31 |
-
previous_line_empty = True
|
32 |
-
# Skip adding multiple consecutive empty lines
|
33 |
-
else:
|
34 |
-
cleaned_lines.append(line.rstrip())
|
35 |
-
previous_line_empty = False
|
36 |
-
return '\n'.join(cleaned_lines)
|
37 |
|
38 |
def create_chat_completion_data(
|
39 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
40 |
) -> Dict[str, Any]:
|
41 |
-
# Clean the content to remove extra blank lines but preserve formatting
|
42 |
-
cleaned_content = clean_content(content)
|
43 |
return {
|
44 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
45 |
"object": "chat.completion.chunk",
|
@@ -48,19 +24,17 @@ def create_chat_completion_data(
|
|
48 |
"choices": [
|
49 |
{
|
50 |
"index": 0,
|
51 |
-
"delta": {"content":
|
52 |
"finish_reason": finish_reason,
|
53 |
}
|
54 |
],
|
55 |
"usage": None,
|
56 |
}
|
57 |
|
|
|
58 |
def message_to_dict(message):
|
59 |
if isinstance(message.content, str):
|
60 |
-
|
61 |
-
if hasattr(message, 'data'):
|
62 |
-
result['data'] = message.data
|
63 |
-
return result
|
64 |
elif isinstance(message.content, list) and len(message.content) == 2:
|
65 |
return {
|
66 |
"role": message.role,
|
@@ -74,6 +48,7 @@ def message_to_dict(message):
|
|
74 |
else:
|
75 |
return {"role": message.role, "content": message.content}
|
76 |
|
|
|
77 |
async def process_streaming_response(request: ChatRequest):
|
78 |
agent_mode = AGENT_MODE.get(request.model, {})
|
79 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
@@ -116,6 +91,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
116 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
117 |
content = content[21:]
|
118 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
|
|
119 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
120 |
yield "data: [DONE]\n\n"
|
121 |
except httpx.HTTPStatusError as e:
|
@@ -125,6 +101,7 @@ async def process_streaming_response(request: ChatRequest):
|
|
125 |
logger.error(f"Error occurred during request: {e}")
|
126 |
raise HTTPException(status_code=500, detail=str(e))
|
127 |
|
|
|
128 |
async def process_non_streaming_response(request: ChatRequest):
|
129 |
agent_mode = AGENT_MODE.get(request.model, {})
|
130 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
@@ -167,9 +144,6 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
167 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
168 |
full_response = full_response[21:]
|
169 |
|
170 |
-
# Clean the content to remove extra blank lines but preserve formatting
|
171 |
-
cleaned_response = clean_content(full_response)
|
172 |
-
|
173 |
return {
|
174 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
175 |
"object": "chat.completion",
|
@@ -178,7 +152,7 @@ async def process_non_streaming_response(request: ChatRequest):
|
|
178 |
"choices": [
|
179 |
{
|
180 |
"index": 0,
|
181 |
-
"message": {"role": "assistant", "content":
|
182 |
"finish_reason": "stop",
|
183 |
}
|
184 |
],
|
|
|
4 |
import uuid
|
5 |
|
6 |
import httpx
|
7 |
+
from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
from fastapi import HTTPException
|
9 |
from api.models import ChatRequest
|
10 |
+
|
11 |
from api.logger import setup_logger
|
12 |
|
13 |
logger = setup_logger(__name__)
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
def create_chat_completion_data(
|
17 |
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
|
18 |
) -> Dict[str, Any]:
|
|
|
|
|
19 |
return {
|
20 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
21 |
"object": "chat.completion.chunk",
|
|
|
24 |
"choices": [
|
25 |
{
|
26 |
"index": 0,
|
27 |
+
"delta": {"content": content, "role": "assistant"},
|
28 |
"finish_reason": finish_reason,
|
29 |
}
|
30 |
],
|
31 |
"usage": None,
|
32 |
}
|
33 |
|
34 |
+
|
35 |
def message_to_dict(message):
|
36 |
if isinstance(message.content, str):
|
37 |
+
return {"role": message.role, "content": message.content}
|
|
|
|
|
|
|
38 |
elif isinstance(message.content, list) and len(message.content) == 2:
|
39 |
return {
|
40 |
"role": message.role,
|
|
|
48 |
else:
|
49 |
return {"role": message.role, "content": message.content}
|
50 |
|
51 |
+
|
52 |
async def process_streaming_response(request: ChatRequest):
|
53 |
agent_mode = AGENT_MODE.get(request.model, {})
|
54 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
|
|
91 |
if content.startswith("$@$v=undefined-rv1$@$"):
|
92 |
content = content[21:]
|
93 |
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
|
94 |
+
|
95 |
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
|
96 |
yield "data: [DONE]\n\n"
|
97 |
except httpx.HTTPStatusError as e:
|
|
|
101 |
logger.error(f"Error occurred during request: {e}")
|
102 |
raise HTTPException(status_code=500, detail=str(e))
|
103 |
|
104 |
+
|
105 |
async def process_non_streaming_response(request: ChatRequest):
|
106 |
agent_mode = AGENT_MODE.get(request.model, {})
|
107 |
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
|
|
|
144 |
if full_response.startswith("$@$v=undefined-rv1$@$"):
|
145 |
full_response = full_response[21:]
|
146 |
|
|
|
|
|
|
|
147 |
return {
|
148 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
149 |
"object": "chat.completion",
|
|
|
152 |
"choices": [
|
153 |
{
|
154 |
"index": 0,
|
155 |
+
"message": {"role": "assistant", "content": full_response},
|
156 |
"finish_reason": "stop",
|
157 |
}
|
158 |
],
|