Niansuh commited on
Commit
4d2cc4f
·
verified ·
1 Parent(s): 7875e72

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +49 -18
api/utils.py CHANGED
@@ -8,7 +8,15 @@ from fastapi import Depends, HTTPException
8
  from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
9
 
10
  from api import validate
11
- from api.config import APP_SECRET, BASE_URL, MODEL_MAPPING, headers
 
 
 
 
 
 
 
 
12
  from api.models import ChatRequest
13
  from api.logger import setup_logger
14
 
@@ -38,30 +46,46 @@ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(securi
38
  raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
39
  return credentials.credentials
40
 
41
- def message_to_dict(message):
42
- if isinstance(message.content, str):
43
- return {"role": message.role, "content": message.content}
44
- elif isinstance(message.content, list) and len(message.content) == 2:
 
 
 
45
  return {
46
  "role": message.role,
47
- "content": message.content[0]["text"],
48
  "data": {
49
- "imageBase64": message.content[1]["image_url"]["url"],
50
  "fileText": "",
51
  "title": "snapshot",
52
  },
53
  }
54
  else:
55
- return {"role": message.role, "content": message.content}
 
 
 
 
 
 
 
 
 
56
 
57
  async def process_streaming_response(request: ChatRequest):
 
 
 
 
58
  json_data = {
59
- "messages": [message_to_dict(msg) for msg in request.messages],
60
  "previewToken": None,
61
  "userId": None,
62
  "codeModelMode": True,
63
- "agentMode": {},
64
- "trendingAgentMode": {},
65
  "isMicMode": False,
66
  "userSystemPrompt": None,
67
  "maxTokens": request.max_tokens,
@@ -98,9 +122,10 @@ async def process_streaming_response(request: ChatRequest):
98
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
99
  break
100
  if content.startswith("$@$v=undefined-rv1$@$"):
101
- yield f"data: {json.dumps(create_chat_completion_data(content[21:], request.model, timestamp))}\n\n"
102
- else:
103
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
 
104
 
105
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
106
  yield "data: [DONE]\n\n"
@@ -112,13 +137,17 @@ async def process_streaming_response(request: ChatRequest):
112
  raise HTTPException(status_code=500, detail=str(e))
113
 
114
  async def process_non_streaming_response(request: ChatRequest):
 
 
 
 
115
  json_data = {
116
- "messages": [message_to_dict(msg) for msg in request.messages],
117
  "previewToken": None,
118
  "userId": None,
119
  "codeModelMode": True,
120
- "agentMode": {},
121
- "trendingAgentMode": {},
122
  "isMicMode": False,
123
  "userSystemPrompt": None,
124
  "maxTokens": request.max_tokens,
@@ -155,6 +184,8 @@ async def process_non_streaming_response(request: ChatRequest):
155
  full_response = "hid已刷新,重新对话即可"
156
  if full_response.startswith("$@$v=undefined-rv1$@$"):
157
  full_response = full_response[21:]
 
 
158
  return {
159
  "id": f"chatcmpl-{uuid.uuid4()}",
160
  "object": "chat.completion",
@@ -163,7 +194,7 @@ async def process_non_streaming_response(request: ChatRequest):
163
  "choices": [
164
  {
165
  "index": 0,
166
- "message": {"role": "assistant", "content": full_response},
167
  "finish_reason": "stop",
168
  }
169
  ],
 
8
  from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
9
 
10
  from api import validate
11
+ from api.config import (
12
+ APP_SECRET,
13
+ BASE_URL,
14
+ MODEL_MAPPING,
15
+ AGENT_MODE,
16
+ TRENDING_AGENT_MODE,
17
+ MODEL_PREFIXES,
18
+ headers,
19
+ )
20
  from api.models import ChatRequest
21
  from api.logger import setup_logger
22
 
 
46
  raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
47
  return credentials.credentials
48
 
49
+ def message_to_dict(message, model_prefix: Optional[str] = None):
50
+ content = message.content
51
+ if isinstance(content, list) and len(content) == 2 and "image_url" in content[1]:
52
+ # Handle messages with image data
53
+ content_text = content[0]["text"]
54
+ if model_prefix:
55
+ content_text = f"{model_prefix} {content_text}"
56
  return {
57
  "role": message.role,
58
+ "content": content_text,
59
  "data": {
60
+ "imageBase64": content[1]["image_url"]["url"],
61
  "fileText": "",
62
  "title": "snapshot",
63
  },
64
  }
65
  else:
66
+ if model_prefix:
67
+ content = f"{model_prefix} {content}"
68
+ return {"role": message.role, "content": content}
69
+
70
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
71
+ """Remove the model prefix from the response content if present."""
72
+ if model_prefix and content.startswith(model_prefix):
73
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
74
+ return content[len(model_prefix):].strip()
75
+ return content
76
 
77
  async def process_streaming_response(request: ChatRequest):
78
+ agent_mode = AGENT_MODE.get(request.model, {})
79
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
80
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
81
+
82
  json_data = {
83
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
84
  "previewToken": None,
85
  "userId": None,
86
  "codeModelMode": True,
87
+ "agentMode": agent_mode,
88
+ "trendingAgentMode": trending_agent_mode,
89
  "isMicMode": False,
90
  "userSystemPrompt": None,
91
  "maxTokens": request.max_tokens,
 
122
  yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
123
  break
124
  if content.startswith("$@$v=undefined-rv1$@$"):
125
+ content = content[21:]
126
+ # Strip model prefix from content
127
+ cleaned_content = strip_model_prefix(content, model_prefix)
128
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
129
 
130
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
131
  yield "data: [DONE]\n\n"
 
137
  raise HTTPException(status_code=500, detail=str(e))
138
 
139
  async def process_non_streaming_response(request: ChatRequest):
140
+ agent_mode = AGENT_MODE.get(request.model, {})
141
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
142
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
143
+
144
  json_data = {
145
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
146
  "previewToken": None,
147
  "userId": None,
148
  "codeModelMode": True,
149
+ "agentMode": agent_mode,
150
+ "trendingAgentMode": trending_agent_mode,
151
  "isMicMode": False,
152
  "userSystemPrompt": None,
153
  "maxTokens": request.max_tokens,
 
184
  full_response = "hid已刷新,重新对话即可"
185
  if full_response.startswith("$@$v=undefined-rv1$@$"):
186
  full_response = full_response[21:]
187
+ # Strip model prefix from full_response
188
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
189
  return {
190
  "id": f"chatcmpl-{uuid.uuid4()}",
191
  "object": "chat.completion",
 
194
  "choices": [
195
  {
196
  "index": 0,
197
+ "message": {"role": "assistant", "content": cleaned_full_response},
198
  "finish_reason": "stop",
199
  }
200
  ],