Niansuh commited on
Commit
a897190
·
verified ·
1 Parent(s): a99b7f4

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +51 -13
api/utils.py CHANGED
@@ -26,14 +26,19 @@ from api import validate
26
 
27
  logger = setup_logger(__name__)
28
 
 
29
  # Helper function to create a random alphanumeric chat ID
30
  def generate_chat_id(length: int = 7) -> str:
31
  characters = string.ascii_letters + string.digits
32
  return ''.join(random.choices(characters, k=length))
33
 
 
34
  # Helper function to create chat completion data
35
  def create_chat_completion_data(
36
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
 
 
 
37
  ) -> Dict[str, Any]:
38
  return {
39
  "id": f"chatcmpl-{uuid.uuid4()}",
@@ -50,12 +55,21 @@ def create_chat_completion_data(
50
  "usage": None,
51
  }
52
 
 
53
  # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
54
  def message_to_dict(message, model_prefix: Optional[str] = None):
55
- content = message.content if isinstance(message.content, str) else message.content[0]["text"]
 
 
 
 
56
  if model_prefix:
57
  content = f"{model_prefix} {content}"
58
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
 
 
 
 
59
  # Ensure base64 images are always included for all models
60
  return {
61
  "role": message.role,
@@ -68,6 +82,7 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
68
  }
69
  return {"role": message.role, "content": content}
70
 
 
71
  # Function to strip model prefix from content if present
72
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
73
  """Remove the model prefix from the response content if present."""
@@ -76,6 +91,7 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
76
  return content[len(model_prefix):].strip()
77
  return content
78
 
 
79
  # Function to get the correct referer URL for logging
80
  def get_referer_url(chat_id: str, model: str) -> str:
81
  """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
@@ -83,11 +99,14 @@ def get_referer_url(chat_id: str, model: str) -> str:
83
  return f"{BASE_URL}/chat/{chat_id}?model={model}"
84
  return BASE_URL
85
 
 
86
  # Process streaming response with headers from config.py
87
  async def process_streaming_response(request: ChatRequest):
88
  chat_id = generate_chat_id()
89
  referer_url = get_referer_url(chat_id, request.model)
90
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
 
 
91
 
92
  agent_mode = AGENT_MODE.get(request.model, {})
93
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
@@ -97,7 +116,9 @@ async def process_streaming_response(request: ChatRequest):
97
 
98
  if request.model == 'o1-preview':
99
  delay_seconds = random.randint(1, 60)
100
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
 
 
101
  await asyncio.sleep(delay_seconds)
102
 
103
  json_data = {
@@ -111,7 +132,9 @@ async def process_streaming_response(request: ChatRequest):
111
  "isChromeExt": False,
112
  "isMicMode": False,
113
  "maxTokens": request.max_tokens,
114
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
 
 
115
  "mobileClient": False,
116
  "playgroundTemperature": request.temperature,
117
  "playgroundTopP": request.top_p,
@@ -143,14 +166,20 @@ async def process_streaming_response(request: ChatRequest):
143
  # Refresh hid and inform the user
144
  validate.getHid(True)
145
  content = "The HID has been refreshed; please try again.\n"
146
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
 
 
147
  break
148
  if content.startswith("$@$v=undefined-rv1$@$"):
149
  content = content[21:]
150
  cleaned_content = strip_model_prefix(content, model_prefix)
151
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
 
 
152
 
153
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
 
 
154
  yield "data: [DONE]\n\n"
155
  except httpx.HTTPStatusError as e:
156
  logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
@@ -159,11 +188,14 @@ async def process_streaming_response(request: ChatRequest):
159
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
160
  raise HTTPException(status_code=500, detail=str(e))
161
 
 
162
  # Process non-streaming response with headers from config.py
163
  async def process_non_streaming_response(request: ChatRequest):
164
  chat_id = generate_chat_id()
165
  referer_url = get_referer_url(chat_id, request.model)
166
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
 
 
167
 
168
  agent_mode = AGENT_MODE.get(request.model, {})
169
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
@@ -171,12 +203,16 @@ async def process_non_streaming_response(request: ChatRequest):
171
 
172
  headers_api_chat = get_headers_api_chat(referer_url)
173
  headers_chat = get_headers_chat(
174
- referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""])
 
 
175
  )
176
 
177
  if request.model == 'o1-preview':
178
  delay_seconds = random.randint(20, 60)
179
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
 
 
180
  await asyncio.sleep(delay_seconds)
181
 
182
  json_data = {
@@ -190,7 +226,9 @@ async def process_non_streaming_response(request: ChatRequest):
190
  "isChromeExt": False,
191
  "isMicMode": False,
192
  "maxTokens": request.max_tokens,
193
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
 
 
194
  "mobileClient": False,
195
  "playgroundTemperature": request.temperature,
196
  "playgroundTopP": request.top_p,
 
26
 
27
  logger = setup_logger(__name__)
28
 
29
+
30
  # Helper function to create a random alphanumeric chat ID
31
  def generate_chat_id(length: int = 7) -> str:
32
  characters = string.ascii_letters + string.digits
33
  return ''.join(random.choices(characters, k=length))
34
 
35
+
36
  # Helper function to create chat completion data
37
  def create_chat_completion_data(
38
+ content: str,
39
+ model: str,
40
+ timestamp: int,
41
+ finish_reason: Optional[str] = None
42
  ) -> Dict[str, Any]:
43
  return {
44
  "id": f"chatcmpl-{uuid.uuid4()}",
 
55
  "usage": None,
56
  }
57
 
58
+
59
  # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
60
  def message_to_dict(message, model_prefix: Optional[str] = None):
61
+ content = (
62
+ message.content
63
+ if isinstance(message.content, str)
64
+ else message.content[0]["text"]
65
+ )
66
  if model_prefix:
67
  content = f"{model_prefix} {content}"
68
+ if (
69
+ isinstance(message.content, list)
70
+ and len(message.content) == 2
71
+ and "image_url" in message.content[1]
72
+ ):
73
  # Ensure base64 images are always included for all models
74
  return {
75
  "role": message.role,
 
82
  }
83
  return {"role": message.role, "content": content}
84
 
85
+
86
  # Function to strip model prefix from content if present
87
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
88
  """Remove the model prefix from the response content if present."""
 
91
  return content[len(model_prefix):].strip()
92
  return content
93
 
94
+
95
  # Function to get the correct referer URL for logging
96
  def get_referer_url(chat_id: str, model: str) -> str:
97
  """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
 
99
  return f"{BASE_URL}/chat/{chat_id}?model={model}"
100
  return BASE_URL
101
 
102
+
103
  # Process streaming response with headers from config.py
104
  async def process_streaming_response(request: ChatRequest):
105
  chat_id = generate_chat_id()
106
  referer_url = get_referer_url(chat_id, request.model)
107
+ logger.info(
108
+ f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}"
109
+ )
110
 
111
  agent_mode = AGENT_MODE.get(request.model, {})
112
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
116
 
117
  if request.model == 'o1-preview':
118
  delay_seconds = random.randint(1, 60)
119
+ logger.info(
120
+ f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})"
121
+ )
122
  await asyncio.sleep(delay_seconds)
123
 
124
  json_data = {
 
132
  "isChromeExt": False,
133
  "isMicMode": False,
134
  "maxTokens": request.max_tokens,
135
+ "messages": [
136
+ message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
137
+ ],
138
  "mobileClient": False,
139
  "playgroundTemperature": request.temperature,
140
  "playgroundTopP": request.top_p,
 
166
  # Refresh hid and inform the user
167
  validate.getHid(True)
168
  content = "The HID has been refreshed; please try again.\n"
169
+ yield (
170
+ f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
171
+ )
172
  break
173
  if content.startswith("$@$v=undefined-rv1$@$"):
174
  content = content[21:]
175
  cleaned_content = strip_model_prefix(content, model_prefix)
176
+ yield (
177
+ f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
178
+ )
179
 
180
+ yield (
181
+ f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
182
+ )
183
  yield "data: [DONE]\n\n"
184
  except httpx.HTTPStatusError as e:
185
  logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
 
188
  logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
189
  raise HTTPException(status_code=500, detail=str(e))
190
 
191
+
192
  # Process non-streaming response with headers from config.py
193
  async def process_non_streaming_response(request: ChatRequest):
194
  chat_id = generate_chat_id()
195
  referer_url = get_referer_url(chat_id, request.model)
196
+ logger.info(
197
+ f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}"
198
+ )
199
 
200
  agent_mode = AGENT_MODE.get(request.model, {})
201
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
 
203
 
204
  headers_api_chat = get_headers_api_chat(referer_url)
205
  headers_chat = get_headers_chat(
206
+ referer_url,
207
+ next_action=str(uuid.uuid4()),
208
+ next_router_state_tree=json.dumps([""]),
209
  )
210
 
211
  if request.model == 'o1-preview':
212
  delay_seconds = random.randint(20, 60)
213
+ logger.info(
214
+ f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})"
215
+ )
216
  await asyncio.sleep(delay_seconds)
217
 
218
  json_data = {
 
226
  "isChromeExt": False,
227
  "isMicMode": False,
228
  "maxTokens": request.max_tokens,
229
+ "messages": [
230
+ message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
231
+ ],
232
  "mobileClient": False,
233
  "playgroundTemperature": request.temperature,
234
  "playgroundTopP": request.top_p,