Niansuh commited on
Commit
8ac4bd6
·
verified ·
1 Parent(s): ed5df64

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +10 -26
api/utils.py CHANGED
@@ -34,11 +34,6 @@ REQUEST_LIMIT_PER_MINUTE = int(os.getenv("REQUEST_LIMIT_PER_MINUTE", "10"))
34
  # Dictionary to track IP addresses and request counts
35
  request_counts = {}
36
 
37
- # Helper function to create a random alphanumeric chat ID
38
- def generate_chat_id(length: int = 7) -> str:
39
- characters = string.ascii_letters + string.digits
40
- return ''.join(random.choices(characters, k=length))
41
-
42
  # Function to get the IP address of the requester
43
  def get_client_ip(request: Request) -> str:
44
  """Retrieve the IP address of the client making the request."""
@@ -114,18 +109,10 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
114
  return content[len(model_prefix):].strip()
115
  return content
116
 
117
- # Function to get the correct referer URL for logging
118
- def get_referer_url(chat_id: str, model: str) -> str:
119
- """Generate the referer URL based on specific models listed in MODEL_REFERERS."""
120
- if model in MODEL_REFERERS:
121
- return f"{BASE_URL}/chat/{chat_id}?model={model}"
122
- return BASE_URL
123
-
124
  # Process streaming response with headers from config.py
125
  async def process_streaming_response(request: ChatRequest, request_obj: Request):
126
- chat_id = generate_chat_id()
127
- referer_url = get_referer_url(chat_id, request.model)
128
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
129
 
130
  # Get the IP address and check rate limit
131
  client_ip = get_client_ip(request_obj)
@@ -141,7 +128,7 @@ async def process_streaming_response(request: ChatRequest, request_obj: Request)
141
 
142
  if request.model == 'o1-preview':
143
  delay_seconds = random.randint(1, 60)
144
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
145
  await asyncio.sleep(delay_seconds)
146
 
147
  json_data = {
@@ -151,7 +138,6 @@ async def process_streaming_response(request: ChatRequest, request_obj: Request)
151
  "clickedForceWebSearch": False,
152
  "codeModelMode": True,
153
  "githubToken": None,
154
- "id": chat_id,
155
  "isChromeExt": False,
156
  "isMicMode": False,
157
  "maxTokens": request.max_tokens,
@@ -190,17 +176,16 @@ async def process_streaming_response(request: ChatRequest, request_obj: Request)
190
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
191
  yield "data: [DONE]\n\n"
192
  except httpx.HTTPStatusError as e:
193
- logger.error(f"HTTP error occurred for Chat ID {chat_id} (IP: {client_ip}): {e}")
194
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
195
  except httpx.RequestError as e:
196
- logger.error(f"Error occurred during request for Chat ID {chat_id} (IP: {client_ip}): {e}")
197
  raise HTTPException(status_code=500, detail=str(e))
198
 
199
  # Process non-streaming response with headers from config.py
200
  async def process_non_streaming_response(request: ChatRequest, request_obj: Request):
201
- chat_id = generate_chat_id()
202
- referer_url = get_referer_url(chat_id, request.model)
203
- logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
204
 
205
  # Get the IP address and check rate limit
206
  client_ip = get_client_ip(request_obj)
@@ -216,7 +201,7 @@ async def process_non_streaming_response(request: ChatRequest, request_obj: Requ
216
 
217
  if request.model == 'o1-preview':
218
  delay_seconds = random.randint(20, 60)
219
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
220
  await asyncio.sleep(delay_seconds)
221
 
222
  json_data = {
@@ -226,7 +211,6 @@ async def process_non_streaming_response(request: ChatRequest, request_obj: Requ
226
  "clickedForceWebSearch": False,
227
  "codeModelMode": True,
228
  "githubToken": None,
229
- "id": chat_id,
230
  "isChromeExt": False,
231
  "isMicMode": False,
232
  "maxTokens": request.max_tokens,
@@ -253,10 +237,10 @@ async def process_non_streaming_response(request: ChatRequest, request_obj: Requ
253
  async for chunk in response.aiter_text():
254
  full_response += chunk
255
  except httpx.HTTPStatusError as e:
256
- logger.error(f"HTTP error occurred for Chat ID {chat_id} (IP: {client_ip}): {e}")
257
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
258
  except httpx.RequestError as e:
259
- logger.error(f"Error occurred during request for Chat ID {chat_id} (IP: {client_ip}): {e}")
260
  raise HTTPException(status_code=500, detail=str(e))
261
  if full_response.startswith("$@$v=undefined-rv1$@$"):
262
  full_response = full_response[21:]
 
34
  # Dictionary to track IP addresses and request counts
35
  request_counts = {}
36
 
 
 
 
 
 
37
  # Function to get the IP address of the requester
38
  def get_client_ip(request: Request) -> str:
39
  """Retrieve the IP address of the client making the request."""
 
109
  return content[len(model_prefix):].strip()
110
  return content
111
 
 
 
 
 
 
 
 
112
  # Process streaming response with headers from config.py
113
  async def process_streaming_response(request: ChatRequest, request_obj: Request):
114
+ referer_url = get_referer_url(request.model)
115
+ logger.info(f"Processing streaming response - Model: {request.model} - URL: {referer_url}")
 
116
 
117
  # Get the IP address and check rate limit
118
  client_ip = get_client_ip(request_obj)
 
128
 
129
  if request.model == 'o1-preview':
130
  delay_seconds = random.randint(1, 60)
131
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
132
  await asyncio.sleep(delay_seconds)
133
 
134
  json_data = {
 
138
  "clickedForceWebSearch": False,
139
  "codeModelMode": True,
140
  "githubToken": None,
 
141
  "isChromeExt": False,
142
  "isMicMode": False,
143
  "maxTokens": request.max_tokens,
 
176
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
177
  yield "data: [DONE]\n\n"
178
  except httpx.HTTPStatusError as e:
179
+ logger.error(f"HTTP error occurred (IP: {client_ip}): {e}")
180
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
181
  except httpx.RequestError as e:
182
+ logger.error(f"Error occurred during request (IP: {client_ip}): {e}")
183
  raise HTTPException(status_code=500, detail=str(e))
184
 
185
  # Process non-streaming response with headers from config.py
186
  async def process_non_streaming_response(request: ChatRequest, request_obj: Request):
187
+ referer_url = get_referer_url(request.model)
188
+ logger.info(f"Processing non-streaming response - Model: {request.model} - URL: {referer_url}")
 
189
 
190
  # Get the IP address and check rate limit
191
  client_ip = get_client_ip(request_obj)
 
201
 
202
  if request.model == 'o1-preview':
203
  delay_seconds = random.randint(20, 60)
204
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
205
  await asyncio.sleep(delay_seconds)
206
 
207
  json_data = {
 
211
  "clickedForceWebSearch": False,
212
  "codeModelMode": True,
213
  "githubToken": None,
 
214
  "isChromeExt": False,
215
  "isMicMode": False,
216
  "maxTokens": request.max_tokens,
 
237
  async for chunk in response.aiter_text():
238
  full_response += chunk
239
  except httpx.HTTPStatusError as e:
240
+ logger.error(f"HTTP error occurred (IP: {client_ip}): {e}")
241
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
242
  except httpx.RequestError as e:
243
+ logger.error(f"Error occurred during request (IP: {client_ip}): {e}")
244
  raise HTTPException(status_code=500, detail=str(e))
245
  if full_response.startswith("$@$v=undefined-rv1$@$"):
246
  full_response = full_response[21:]