Niansuh commited on
Commit
276ccce
·
verified ·
1 Parent(s): 6aaa586

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +10 -27
api/utils.py CHANGED
@@ -14,9 +14,7 @@ from api.config import (
14
  get_headers_chat,
15
  BASE_URL,
16
  AGENT_MODE,
17
- TRENDING_AGENT_MODE,
18
- MODEL_PREFIXES,
19
- MODEL_REFERERS
20
  )
21
  from api.models import ChatRequest
22
  from api.logger import setup_logger
@@ -48,10 +46,8 @@ def create_chat_completion_data(
48
  }
49
 
50
  # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
51
- def message_to_dict(message, model_prefix: Optional[str] = None):
52
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
53
- if model_prefix:
54
- content = f"{model_prefix} {content}"
55
  if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
56
  # Ensure base64 images are always included for all models
57
  return {
@@ -66,29 +62,17 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
66
  return {"role": message.role, "content": content}
67
 
68
  # Function to strip model prefix from content if present
69
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
70
- """Remove the model prefix from the response content if present."""
71
- if model_prefix and content.startswith(model_prefix):
72
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
73
- return content[len(model_prefix):].strip()
74
- return content
75
-
76
- # Function to get the correct referer URL for logging
77
- def get_referer_url(chat_id: str, model: str) -> str:
78
- """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
79
- if model in MODEL_REFERERS:
80
- return f"{BASE_URL}/chat/{chat_id}?model={model}"
81
- return BASE_URL
82
 
83
  # Process streaming response with headers from config.py
84
  async def process_streaming_response(request: ChatRequest):
85
  chat_id = generate_chat_id()
86
- referer_url = get_referer_url(chat_id, request.model)
87
  logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
88
 
89
  agent_mode = AGENT_MODE.get(request.model, {})
90
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
91
- model_prefix = MODEL_PREFIXES.get(request.model, "")
92
 
93
  headers_api_chat = get_headers_api_chat(referer_url)
94
 
@@ -108,7 +92,7 @@ async def process_streaming_response(request: ChatRequest):
108
  "isChromeExt": False,
109
  "isMicMode": False,
110
  "maxTokens": request.max_tokens,
111
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
112
  "mobileClient": False,
113
  "playgroundTemperature": request.temperature,
114
  "playgroundTopP": request.top_p,
@@ -137,7 +121,7 @@ async def process_streaming_response(request: ChatRequest):
137
  content = line
138
  if content.startswith("$@$v=undefined-rv1$@$"):
139
  content = content[21:]
140
- cleaned_content = strip_model_prefix(content, model_prefix)
141
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
142
 
143
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
@@ -152,12 +136,11 @@ async def process_streaming_response(request: ChatRequest):
152
  # Process non-streaming response with headers from config.py
153
  async def process_non_streaming_response(request: ChatRequest):
154
  chat_id = generate_chat_id()
155
- referer_url = get_referer_url(chat_id, request.model)
156
  logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
157
 
158
  agent_mode = AGENT_MODE.get(request.model, {})
159
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
160
- model_prefix = MODEL_PREFIXES.get(request.model, "")
161
 
162
  headers_api_chat = get_headers_api_chat(referer_url)
163
  headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
@@ -178,7 +161,7 @@ async def process_non_streaming_response(request: ChatRequest):
178
  "isChromeExt": False,
179
  "isMicMode": False,
180
  "maxTokens": request.max_tokens,
181
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
182
  "mobileClient": False,
183
  "playgroundTemperature": request.temperature,
184
  "playgroundTopP": request.top_p,
@@ -209,7 +192,7 @@ async def process_non_streaming_response(request: ChatRequest):
209
  if full_response.startswith("$@$v=undefined-rv1$@$"):
210
  full_response = full_response[21:]
211
 
212
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
213
 
214
  return {
215
  "id": f"chatcmpl-{uuid.uuid4()}",
 
14
  get_headers_chat,
15
  BASE_URL,
16
  AGENT_MODE,
17
+ TRENDING_AGENT_MODE
 
 
18
  )
19
  from api.models import ChatRequest
20
  from api.logger import setup_logger
 
46
  }
47
 
48
  # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
49
+ def message_to_dict(message):
50
  content = message.content if isinstance(message.content, str) else message.content[0]["text"]
 
 
51
  if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
52
  # Ensure base64 images are always included for all models
53
  return {
 
62
  return {"role": message.role, "content": content}
63
 
64
  # Function to strip model prefix from content if present
65
+ def strip_model_prefix(content: str) -> str:
66
+ return content.strip()
 
 
 
 
 
 
 
 
 
 
 
67
 
68
  # Process streaming response with headers from config.py
69
  async def process_streaming_response(request: ChatRequest):
70
  chat_id = generate_chat_id()
71
+ referer_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}"
72
  logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
73
 
74
  agent_mode = AGENT_MODE.get(request.model, {})
75
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
76
 
77
  headers_api_chat = get_headers_api_chat(referer_url)
78
 
 
92
  "isChromeExt": False,
93
  "isMicMode": False,
94
  "maxTokens": request.max_tokens,
95
+ "messages": [message_to_dict(msg) for msg in request.messages],
96
  "mobileClient": False,
97
  "playgroundTemperature": request.temperature,
98
  "playgroundTopP": request.top_p,
 
121
  content = line
122
  if content.startswith("$@$v=undefined-rv1$@$"):
123
  content = content[21:]
124
+ cleaned_content = strip_model_prefix(content)
125
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
126
 
127
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
 
136
  # Process non-streaming response with headers from config.py
137
  async def process_non_streaming_response(request: ChatRequest):
138
  chat_id = generate_chat_id()
139
+ referer_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}"
140
  logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
141
 
142
  agent_mode = AGENT_MODE.get(request.model, {})
143
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
144
 
145
  headers_api_chat = get_headers_api_chat(referer_url)
146
  headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
 
161
  "isChromeExt": False,
162
  "isMicMode": False,
163
  "maxTokens": request.max_tokens,
164
+ "messages": [message_to_dict(msg) for msg in request.messages],
165
  "mobileClient": False,
166
  "playgroundTemperature": request.temperature,
167
  "playgroundTopP": request.top_p,
 
192
  if full_response.startswith("$@$v=undefined-rv1$@$"):
193
  full_response = full_response[21:]
194
 
195
+ cleaned_full_response = strip_model_prefix(full_response)
196
 
197
  return {
198
  "id": f"chatcmpl-{uuid.uuid4()}",