Niansuh commited on
Commit
01607ce
·
verified ·
1 Parent(s): 41c59b2

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +22 -21
api/utils.py CHANGED
@@ -19,9 +19,13 @@ from api.config import (
19
  MODEL_REFERERS
20
  )
21
  from api.models import ChatRequest
22
- from api.logger import setup_logger
23
-
24
- logger = setup_logger(__name__)
 
 
 
 
25
 
26
  # Helper function to create a random alphanumeric chat ID
27
  def generate_chat_id(length: int = 7) -> str:
@@ -74,30 +78,29 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
74
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
75
  """Remove the model prefix from the response content if present."""
76
  if model_prefix and content.startswith(model_prefix):
77
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
78
  return content[len(model_prefix):].strip()
79
- logger.debug("No prefix to strip from content.")
80
  return content
81
 
82
  # Process streaming response with headers from config.py
83
  async def process_streaming_response(request: ChatRequest):
84
  chat_id = generate_chat_id()
85
  chat_url = f"/chat/{chat_id}?model={request.model}"
86
- logger.info(f"Generated Chat ID: {chat_id} - URL: {chat_url}")
87
 
88
  agent_mode = AGENT_MODE.get(request.model, {})
89
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
90
  model_prefix = MODEL_PREFIXES.get(request.model, "")
91
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
 
 
92
  referer_url = f"{BASE_URL}{referer_path}"
93
 
94
- # Generate headers for API chat request with dynamic Referer
95
  headers_api_chat = get_headers_api_chat(referer_url)
96
 
97
- # Introduce delay for 'o1-preview' model
98
  if request.model == 'o1-preview':
99
  delay_seconds = random.randint(20, 60)
100
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
101
  await asyncio.sleep(delay_seconds)
102
 
103
  json_data = {
@@ -140,39 +143,38 @@ async def process_streaming_response(request: ChatRequest):
140
  content = line
141
  if content.startswith("$@$v=undefined-rv1$@$"):
142
  content = content[21:]
143
- # Strip the model prefix from the response content
144
  cleaned_content = strip_model_prefix(content, model_prefix)
145
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
146
 
147
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
148
  yield "data: [DONE]\n\n"
149
  except httpx.HTTPStatusError as e:
150
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
151
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
152
  except httpx.RequestError as e:
153
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
154
  raise HTTPException(status_code=500, detail=str(e))
155
 
156
  # Process non-streaming response with headers from config.py
157
  async def process_non_streaming_response(request: ChatRequest):
158
  chat_id = generate_chat_id()
159
  chat_url = f"/chat/{chat_id}?model={request.model}"
160
- logger.info(f"Generated Chat ID: {chat_id} - URL: {chat_url}")
161
 
162
  agent_mode = AGENT_MODE.get(request.model, {})
163
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
164
  model_prefix = MODEL_PREFIXES.get(request.model, "")
165
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
 
 
166
  referer_url = f"{BASE_URL}{referer_path}"
167
 
168
- # Generate headers for API chat request and chat request with dynamic Referer
169
  headers_api_chat = get_headers_api_chat(referer_url)
170
  headers_chat = get_headers_chat(chat_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
171
 
172
- # Introduce delay for 'o1-preview' model
173
  if request.model == 'o1-preview':
174
  delay_seconds = random.randint(20, 60)
175
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
176
  await asyncio.sleep(delay_seconds)
177
 
178
  json_data = {
@@ -209,15 +211,14 @@ async def process_non_streaming_response(request: ChatRequest):
209
  async for chunk in response.aiter_text():
210
  full_response += chunk
211
  except httpx.HTTPStatusError as e:
212
- logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
213
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
214
  except httpx.RequestError as e:
215
- logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
216
  raise HTTPException(status_code=500, detail=str(e))
217
  if full_response.startswith("$@$v=undefined-rv1$@$"):
218
  full_response = full_response[21:]
219
 
220
- # Strip the model prefix from the full response
221
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
222
 
223
  return {
 
19
  MODEL_REFERERS
20
  )
21
  from api.models import ChatRequest
22
+ from api.logger import (
23
+ log_generated_chat_id,
24
+ log_model_delay,
25
+ log_http_error,
26
+ log_request_error,
27
+ log_strip_prefix
28
+ )
29
 
30
  # Helper function to create a random alphanumeric chat ID
31
  def generate_chat_id(length: int = 7) -> str:
 
78
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
79
  """Remove the model prefix from the response content if present."""
80
  if model_prefix and content.startswith(model_prefix):
81
+ log_strip_prefix(model_prefix)
82
  return content[len(model_prefix):].strip()
 
83
  return content
84
 
85
  # Process streaming response with headers from config.py
86
  async def process_streaming_response(request: ChatRequest):
87
  chat_id = generate_chat_id()
88
  chat_url = f"/chat/{chat_id}?model={request.model}"
89
+ log_generated_chat_id(chat_id, chat_url)
90
 
91
  agent_mode = AGENT_MODE.get(request.model, {})
92
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
93
  model_prefix = MODEL_PREFIXES.get(request.model, "")
94
+
95
+ # Retrieve referer based on the model specified in the request
96
+ referer_path = MODEL_REFERERS.get(request.model, "/?model=default")
97
  referer_url = f"{BASE_URL}{referer_path}"
98
 
 
99
  headers_api_chat = get_headers_api_chat(referer_url)
100
 
 
101
  if request.model == 'o1-preview':
102
  delay_seconds = random.randint(20, 60)
103
+ log_model_delay(delay_seconds, chat_id, request.model)
104
  await asyncio.sleep(delay_seconds)
105
 
106
  json_data = {
 
143
  content = line
144
  if content.startswith("$@$v=undefined-rv1$@$"):
145
  content = content[21:]
 
146
  cleaned_content = strip_model_prefix(content, model_prefix)
147
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
148
 
149
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
150
  yield "data: [DONE]\n\n"
151
  except httpx.HTTPStatusError as e:
152
+ log_http_error(chat_id, e)
153
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
154
  except httpx.RequestError as e:
155
+ log_request_error(chat_id, e)
156
  raise HTTPException(status_code=500, detail=str(e))
157
 
158
  # Process non-streaming response with headers from config.py
159
  async def process_non_streaming_response(request: ChatRequest):
160
  chat_id = generate_chat_id()
161
  chat_url = f"/chat/{chat_id}?model={request.model}"
162
+ log_generated_chat_id(chat_id, chat_url)
163
 
164
  agent_mode = AGENT_MODE.get(request.model, {})
165
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
166
  model_prefix = MODEL_PREFIXES.get(request.model, "")
167
+
168
+ # Retrieve referer based on the model specified in the request
169
+ referer_path = MODEL_REFERERS.get(request.model, "/?model=default")
170
  referer_url = f"{BASE_URL}{referer_path}"
171
 
 
172
  headers_api_chat = get_headers_api_chat(referer_url)
173
  headers_chat = get_headers_chat(chat_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
174
 
 
175
  if request.model == 'o1-preview':
176
  delay_seconds = random.randint(20, 60)
177
+ log_model_delay(delay_seconds, chat_id, request.model)
178
  await asyncio.sleep(delay_seconds)
179
 
180
  json_data = {
 
211
  async for chunk in response.aiter_text():
212
  full_response += chunk
213
  except httpx.HTTPStatusError as e:
214
+ log_http_error(chat_id, e)
215
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
216
  except httpx.RequestError as e:
217
+ log_request_error(chat_id, e)
218
  raise HTTPException(status_code=500, detail=str(e))
219
  if full_response.startswith("$@$v=undefined-rv1$@$"):
220
  full_response = full_response[21:]
221
 
 
222
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
223
 
224
  return {