Niansuh commited on
Commit
6fc1306
·
verified ·
1 Parent(s): 71425a9

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +32 -26
api/utils.py CHANGED
@@ -18,7 +18,6 @@ from api.config import (
18
  MODEL_PREFIXES,
19
  MODEL_REFERERS
20
  )
21
- from api.models import ChatRequest
22
  from api.logger import (
23
  log_generated_chat_id,
24
  log_model_delay,
@@ -84,23 +83,27 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
84
 
85
  # Process streaming response with headers from config.py
86
  async def process_streaming_response(request: ChatRequest):
87
- chat_id = generate_chat_id()
88
- chat_url = f"/chat/{chat_id}?model={request.model}"
89
- log_generated_chat_id(chat_id, chat_url)
90
-
 
 
 
 
 
 
 
 
91
  agent_mode = AGENT_MODE.get(request.model, {})
92
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
93
  model_prefix = MODEL_PREFIXES.get(request.model, "")
94
-
95
- # Retrieve referer based on the model specified in the request
96
- referer_path = MODEL_REFERERS.get(request.model, "/?model=default")
97
- referer_url = f"{BASE_URL}{referer_path}"
98
 
99
  headers_api_chat = get_headers_api_chat(referer_url)
100
 
101
  if request.model == 'o1-preview':
102
  delay_seconds = random.randint(20, 60)
103
- log_model_delay(delay_seconds, chat_id, request.model)
104
  await asyncio.sleep(delay_seconds)
105
 
106
  json_data = {
@@ -110,7 +113,7 @@ async def process_streaming_response(request: ChatRequest):
110
  "clickedForceWebSearch": False,
111
  "codeModelMode": True,
112
  "githubToken": None,
113
- "id": chat_id,
114
  "isChromeExt": False,
115
  "isMicMode": False,
116
  "maxTokens": request.max_tokens,
@@ -149,32 +152,35 @@ async def process_streaming_response(request: ChatRequest):
149
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
150
  yield "data: [DONE]\n\n"
151
  except httpx.HTTPStatusError as e:
152
- log_http_error(chat_id, e)
153
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
154
  except httpx.RequestError as e:
155
- log_request_error(chat_id, e)
156
  raise HTTPException(status_code=500, detail=str(e))
157
 
158
  # Process non-streaming response with headers from config.py
159
  async def process_non_streaming_response(request: ChatRequest):
160
- chat_id = generate_chat_id()
161
- chat_url = f"/chat/{chat_id}?model={request.model}"
162
- log_generated_chat_id(chat_id, chat_url)
163
-
 
 
 
 
 
 
 
164
  agent_mode = AGENT_MODE.get(request.model, {})
165
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
166
  model_prefix = MODEL_PREFIXES.get(request.model, "")
167
-
168
- # Retrieve referer based on the model specified in the request
169
- referer_path = MODEL_REFERERS.get(request.model, "/?model=default")
170
- referer_url = f"{BASE_URL}{referer_path}"
171
 
172
  headers_api_chat = get_headers_api_chat(referer_url)
173
- headers_chat = get_headers_chat(chat_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
174
 
175
  if request.model == 'o1-preview':
176
  delay_seconds = random.randint(20, 60)
177
- log_model_delay(delay_seconds, chat_id, request.model)
178
  await asyncio.sleep(delay_seconds)
179
 
180
  json_data = {
@@ -184,7 +190,7 @@ async def process_non_streaming_response(request: ChatRequest):
184
  "clickedForceWebSearch": False,
185
  "codeModelMode": True,
186
  "githubToken": None,
187
- "id": chat_id,
188
  "isChromeExt": False,
189
  "isMicMode": False,
190
  "maxTokens": request.max_tokens,
@@ -211,10 +217,10 @@ async def process_non_streaming_response(request: ChatRequest):
211
  async for chunk in response.aiter_text():
212
  full_response += chunk
213
  except httpx.HTTPStatusError as e:
214
- log_http_error(chat_id, e)
215
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
216
  except httpx.RequestError as e:
217
- log_request_error(chat_id, e)
218
  raise HTTPException(status_code=500, detail=str(e))
219
  if full_response.startswith("$@$v=undefined-rv1$@$"):
220
  full_response = full_response[21:]
 
18
  MODEL_PREFIXES,
19
  MODEL_REFERERS
20
  )
 
21
  from api.logger import (
22
  log_generated_chat_id,
23
  log_model_delay,
 
83
 
84
  # Process streaming response with headers from config.py
85
  async def process_streaming_response(request: ChatRequest):
86
+ # Determine if a Chat ID and referer URL should be generated
87
+ if request.model in MODEL_REFERERS:
88
+ chat_id = generate_chat_id()
89
+ chat_url = f"/chat/{chat_id}?model={request.model}"
90
+ log_generated_chat_id(chat_id, chat_url)
91
+
92
+ referer_path = MODEL_REFERERS[request.model]
93
+ referer_url = f"{BASE_URL}{referer_path}"
94
+ else:
95
+ chat_id = None
96
+ referer_url = BASE_URL # Use base URL for models not in MODEL_REFERERS
97
+
98
  agent_mode = AGENT_MODE.get(request.model, {})
99
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
100
  model_prefix = MODEL_PREFIXES.get(request.model, "")
 
 
 
 
101
 
102
  headers_api_chat = get_headers_api_chat(referer_url)
103
 
104
  if request.model == 'o1-preview':
105
  delay_seconds = random.randint(20, 60)
106
+ log_model_delay(delay_seconds, chat_id, request.model if chat_id else "unknown")
107
  await asyncio.sleep(delay_seconds)
108
 
109
  json_data = {
 
113
  "clickedForceWebSearch": False,
114
  "codeModelMode": True,
115
  "githubToken": None,
116
+ "id": chat_id if chat_id else "unknown",
117
  "isChromeExt": False,
118
  "isMicMode": False,
119
  "maxTokens": request.max_tokens,
 
152
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
153
  yield "data: [DONE]\n\n"
154
  except httpx.HTTPStatusError as e:
155
+ log_http_error(chat_id if chat_id else "unknown", e)
156
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
157
  except httpx.RequestError as e:
158
+ log_request_error(chat_id if chat_id else "unknown", e)
159
  raise HTTPException(status_code=500, detail=str(e))
160
 
161
  # Process non-streaming response with headers from config.py
162
  async def process_non_streaming_response(request: ChatRequest):
163
+ if request.model in MODEL_REFERERS:
164
+ chat_id = generate_chat_id()
165
+ chat_url = f"/chat/{chat_id}?model={request.model}"
166
+ log_generated_chat_id(chat_id, chat_url)
167
+
168
+ referer_path = MODEL_REFERERS[request.model]
169
+ referer_url = f"{BASE_URL}{referer_path}"
170
+ else:
171
+ chat_id = None
172
+ referer_url = BASE_URL
173
+
174
  agent_mode = AGENT_MODE.get(request.model, {})
175
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
176
  model_prefix = MODEL_PREFIXES.get(request.model, "")
 
 
 
 
177
 
178
  headers_api_chat = get_headers_api_chat(referer_url)
179
+ headers_chat = get_headers_chat(chat_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""])) if chat_id else None
180
 
181
  if request.model == 'o1-preview':
182
  delay_seconds = random.randint(20, 60)
183
+ log_model_delay(delay_seconds, chat_id if chat_id else "unknown", request.model)
184
  await asyncio.sleep(delay_seconds)
185
 
186
  json_data = {
 
190
  "clickedForceWebSearch": False,
191
  "codeModelMode": True,
192
  "githubToken": None,
193
+ "id": chat_id if chat_id else "unknown",
194
  "isChromeExt": False,
195
  "isMicMode": False,
196
  "maxTokens": request.max_tokens,
 
217
  async for chunk in response.aiter_text():
218
  full_response += chunk
219
  except httpx.HTTPStatusError as e:
220
+ log_http_error(chat_id if chat_id else "unknown", e)
221
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
222
  except httpx.RequestError as e:
223
+ log_request_error(chat_id if chat_id else "unknown", e)
224
  raise HTTPException(status_code=500, detail=str(e))
225
  if full_response.startswith("$@$v=undefined-rv1$@$"):
226
  full_response = full_response[21:]