Niansuh commited on
Commit
9e790d4
·
verified ·
1 Parent(s): de93143

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +54 -56
api/utils.py CHANGED
@@ -2,10 +2,6 @@
2
 
3
  from datetime import datetime
4
  import json
5
- import uuid
6
- import asyncio
7
- import random
8
- import string
9
  from typing import Any, Dict, Optional
10
 
11
  import httpx
@@ -31,12 +27,6 @@ logger = setup_logger(__name__)
31
  bearer_scheme = HTTPBearer()
32
 
33
 
34
- def generate_chat_id(length: int = 7) -> str:
35
- """Generate a random alphanumeric chat ID."""
36
- characters = string.ascii_letters + string.digits
37
- return ''.join(random.choices(characters, k=length))
38
-
39
-
40
  def create_chat_completion_data(
41
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
42
  ) -> Dict[str, Any]:
@@ -66,33 +56,39 @@ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(bearer
66
  return credentials.credentials
67
 
68
 
69
- def message_to_dict(message, model_prefix: Optional[str] = None):
70
  """
71
  Convert a message object to a dictionary suitable for the API request.
72
- Ensures base64 data and optional model prefix are handled.
73
  """
 
 
74
  if isinstance(message.content, str):
75
- content = message.content
76
  elif isinstance(message.content, list):
77
- content = message.content[0].get("text", "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  else:
79
- content = str(message.content)
80
-
81
- if model_prefix:
82
- content = f"{model_prefix} {content}"
83
 
84
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
85
- return {
86
- "role": message.role,
87
- "content": content,
88
- "data": {
89
- "imageBase64": message.content[1]["image_url"]["url"],
90
- "fileText": "",
91
- "title": "snapshot",
92
- },
93
- }
94
-
95
- return {"role": message.role, "content": content}
96
 
97
 
98
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
@@ -103,8 +99,6 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
103
  return content
104
 
105
 
106
-
107
-
108
  def get_agent_mode_config(model: str) -> Dict[str, Any]:
109
  """
110
  Retrieve the agent mode configuration.
@@ -129,19 +123,22 @@ def get_trending_agent_mode_config(model: str) -> Dict[str, Any]:
129
 
130
  async def process_streaming_response(request: ChatRequest):
131
  """Process a streaming response for a chat completion request."""
132
- chat_id = generate_chat_id()
133
- referer_url = get_referer_url(chat_id, request.model)
134
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
 
135
 
136
  agent_mode = get_agent_mode_config(request.model)
137
  trending_agent_mode = get_trending_agent_mode_config(request.model)
138
  model_prefix = MODEL_PREFIXES.get(request.model, "")
139
 
140
- headers_api_chat = headers # Assuming 'headers' from config.py is suitable
 
141
 
 
142
  if request.model == 'o1-preview':
143
  delay_seconds = random.randint(1, 60)
144
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
145
  await asyncio.sleep(delay_seconds)
146
 
147
  json_data = {
@@ -151,11 +148,10 @@ async def process_streaming_response(request: ChatRequest):
151
  "clickedForceWebSearch": False,
152
  "codeModelMode": True,
153
  "githubToken": None,
154
- "id": chat_id,
155
  "isChromeExt": False,
156
  "isMicMode": False,
157
  "maxTokens": request.max_tokens,
158
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
159
  "mobileClient": False,
160
  "playgroundTemperature": request.temperature,
161
  "playgroundTopP": request.top_p,
@@ -196,31 +192,31 @@ async def process_streaming_response(request: ChatRequest):
196
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
197
  yield "data: [DONE]\n\n"
198
  except httpx.HTTPStatusError as e:
199
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
200
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
201
  except httpx.RequestError as e:
202
- logger.error(f"Request error occurred for Chat ID {chat_id}: {e}")
203
- raise HTTPException(status_code=500, detail=str(e))
204
  except Exception as e:
205
- logger.error(f"Unexpected error for Chat ID {chat_id}: {e}")
206
- raise HTTPException(status_code=500, detail=str(e))
207
 
208
 
209
  async def process_non_streaming_response(request: ChatRequest):
210
  """Process a non-streaming response for a chat completion request."""
211
- chat_id = generate_chat_id()
212
- referer_url = get_referer_url(chat_id, request.model)
213
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
214
 
215
  agent_mode = get_agent_mode_config(request.model)
216
  trending_agent_mode = get_trending_agent_mode_config(request.model)
217
  model_prefix = MODEL_PREFIXES.get(request.model, "")
218
 
219
- headers_api_chat = headers # Assuming 'headers' from config.py is suitable
 
220
 
 
221
  if request.model == 'o1-preview':
222
  delay_seconds = random.randint(20, 60)
223
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
224
  await asyncio.sleep(delay_seconds)
225
 
226
  json_data = {
@@ -230,11 +226,10 @@ async def process_non_streaming_response(request: ChatRequest):
230
  "clickedForceWebSearch": False,
231
  "codeModelMode": True,
232
  "githubToken": None,
233
- "id": chat_id,
234
  "isChromeExt": False,
235
  "isMicMode": False,
236
  "maxTokens": request.max_tokens,
237
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
238
  "mobileClient": False,
239
  "playgroundTemperature": request.temperature,
240
  "playgroundTopP": request.top_p,
@@ -257,11 +252,14 @@ async def process_non_streaming_response(request: ChatRequest):
257
  async for chunk in response.aiter_text():
258
  full_response += chunk
259
  except httpx.HTTPStatusError as e:
260
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
261
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
262
  except httpx.RequestError as e:
263
- logger.error(f"Request error occurred for Chat ID {chat_id}: {e}")
264
- raise HTTPException(status_code=500, detail=str(e))
 
 
 
265
 
266
  if full_response.startswith("$@$v=undefined-rv1$@$"):
267
  full_response = full_response[21:]
 
2
 
3
  from datetime import datetime
4
  import json
 
 
 
 
5
  from typing import Any, Dict, Optional
6
 
7
  import httpx
 
27
  bearer_scheme = HTTPBearer()
28
 
29
 
 
 
 
 
 
 
30
  def create_chat_completion_data(
31
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
32
  ) -> Dict[str, Any]:
 
56
  return credentials.credentials
57
 
58
 
59
+ def message_to_dict(message):
60
  """
61
  Convert a message object to a dictionary suitable for the API request.
62
+ Handles different content types gracefully.
63
  """
64
+ message_dict = {"role": message.role}
65
+
66
  if isinstance(message.content, str):
67
+ message_dict["content"] = message.content
68
  elif isinstance(message.content, list):
69
+ # Handle list content more robustly
70
+ try:
71
+ if len(message.content) >= 2:
72
+ # Assuming the first element has 'text' and the second has 'image_url'
73
+ text_content = message.content[0].get("text", "")
74
+ image_url = message.content[1].get("image_url", {}).get("url", "")
75
+ message_dict["content"] = text_content
76
+ message_dict["data"] = {
77
+ "imageBase64": image_url,
78
+ "fileText": "",
79
+ "title": "snapshot",
80
+ }
81
+ else:
82
+ # Fallback if the list doesn't have expected structure
83
+ message_dict["content"] = json.dumps(message.content)
84
+ except (AttributeError, KeyError, TypeError) as e:
85
+ logger.error(f"Error parsing message content: {e}")
86
+ message_dict["content"] = "Invalid message format."
87
  else:
88
+ # Fallback for unexpected content types
89
+ message_dict["content"] = str(message.content)
 
 
90
 
91
+ return message_dict
 
 
 
 
 
 
 
 
 
 
 
92
 
93
 
94
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
 
99
  return content
100
 
101
 
 
 
102
  def get_agent_mode_config(model: str) -> Dict[str, Any]:
103
  """
104
  Retrieve the agent mode configuration.
 
123
 
124
  async def process_streaming_response(request: ChatRequest):
125
  """Process a streaming response for a chat completion request."""
126
+ # No chat_id generation
127
+ # referer_url is not used without MODEL_REFERERS
128
+
129
+ logger.info(f"Processing streaming request for model: '{request.model}'")
130
 
131
  agent_mode = get_agent_mode_config(request.model)
132
  trending_agent_mode = get_trending_agent_mode_config(request.model)
133
  model_prefix = MODEL_PREFIXES.get(request.model, "")
134
 
135
+ # Use headers from config.py
136
+ headers_api_chat = headers
137
 
138
+ # Introduce delay for specific models if needed
139
  if request.model == 'o1-preview':
140
  delay_seconds = random.randint(1, 60)
141
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
142
  await asyncio.sleep(delay_seconds)
143
 
144
  json_data = {
 
148
  "clickedForceWebSearch": False,
149
  "codeModelMode": True,
150
  "githubToken": None,
 
151
  "isChromeExt": False,
152
  "isMicMode": False,
153
  "maxTokens": request.max_tokens,
154
+ "messages": [message_to_dict(msg) for msg in request.messages],
155
  "mobileClient": False,
156
  "playgroundTemperature": request.temperature,
157
  "playgroundTopP": request.top_p,
 
192
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
193
  yield "data: [DONE]\n\n"
194
  except httpx.HTTPStatusError as e:
195
+ logger.error(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
196
+ raise HTTPException(status_code=e.response.status_code, detail="Error from upstream service.")
197
  except httpx.RequestError as e:
198
+ logger.error(f"Request error occurred: {e}")
199
+ raise HTTPException(status_code=500, detail="Internal server error.")
200
  except Exception as e:
201
+ logger.error(f"Unexpected error: {e}")
202
+ raise HTTPException(status_code=500, detail="Internal server error.")
203
 
204
 
205
  async def process_non_streaming_response(request: ChatRequest):
206
  """Process a non-streaming response for a chat completion request."""
207
+ logger.info(f"Processing non-streaming request for model: '{request.model}'")
 
 
208
 
209
  agent_mode = get_agent_mode_config(request.model)
210
  trending_agent_mode = get_trending_agent_mode_config(request.model)
211
  model_prefix = MODEL_PREFIXES.get(request.model, "")
212
 
213
+ # Use headers from config.py
214
+ headers_api_chat = headers
215
 
216
+ # Introduce delay for specific models if needed
217
  if request.model == 'o1-preview':
218
  delay_seconds = random.randint(20, 60)
219
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
220
  await asyncio.sleep(delay_seconds)
221
 
222
  json_data = {
 
226
  "clickedForceWebSearch": False,
227
  "codeModelMode": True,
228
  "githubToken": None,
 
229
  "isChromeExt": False,
230
  "isMicMode": False,
231
  "maxTokens": request.max_tokens,
232
+ "messages": [message_to_dict(msg) for msg in request.messages],
233
  "mobileClient": False,
234
  "playgroundTemperature": request.temperature,
235
  "playgroundTopP": request.top_p,
 
252
  async for chunk in response.aiter_text():
253
  full_response += chunk
254
  except httpx.HTTPStatusError as e:
255
+ logger.error(f"HTTP error occurred: {e.response.status_code} - {e.response.text}")
256
+ raise HTTPException(status_code=e.response.status_code, detail="Error from upstream service.")
257
  except httpx.RequestError as e:
258
+ logger.error(f"Request error occurred: {e}")
259
+ raise HTTPException(status_code=500, detail="Internal server error.")
260
+ except Exception as e:
261
+ logger.error(f"Unexpected error: {e}")
262
+ raise HTTPException(status_code=500, detail="Internal server error.")
263
 
264
  if full_response.startswith("$@$v=undefined-rv1$@$"):
265
  full_response = full_response[21:]