Niansuh commited on
Commit
5cecd79
·
verified ·
1 Parent(s): 8ad0c8f

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +34 -27
api/utils.py CHANGED
@@ -1,28 +1,32 @@
1
  from datetime import datetime
2
  import json
 
 
 
3
  from typing import Any, Dict, Optional
4
 
5
  import httpx
 
6
  from api.config import (
7
  MODEL_MAPPING,
8
- headers,
 
 
9
  AGENT_MODE,
10
  TRENDING_AGENT_MODE,
11
- BASE_URL,
12
  MODEL_PREFIXES,
13
  MODEL_REFERERS
14
  )
15
- from fastapi import HTTPException
16
  from api.models import ChatRequest
17
-
18
  from api.logger import setup_logger
19
 
20
- import uuid
21
- import asyncio
22
- import random # Newly added imports
23
-
24
  logger = setup_logger(__name__)
25
 
 
 
 
 
 
26
  def create_chat_completion_data(
27
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
28
  ) -> Dict[str, Any]:
@@ -41,6 +45,7 @@ def create_chat_completion_data(
41
  "usage": None,
42
  }
43
 
 
44
  def message_to_dict(message, model_prefix: Optional[str] = None):
45
  if isinstance(message.content, str):
46
  content = message.content
@@ -63,6 +68,7 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
63
  else:
64
  return {"role": message.role, "content": message.content}
65
 
 
66
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
67
  """Remove the model prefix from the response content if present."""
68
  if model_prefix and content.startswith(model_prefix):
@@ -71,16 +77,17 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
71
  logger.debug("No prefix to strip from content.")
72
  return content
73
 
 
74
  async def process_streaming_response(request: ChatRequest):
 
75
  agent_mode = AGENT_MODE.get(request.model, {})
76
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
77
  model_prefix = MODEL_PREFIXES.get(request.model, "")
78
  referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
79
  referer_url = f"{BASE_URL}{referer_path}"
80
 
81
- # Update headers with dynamic Referer
82
- dynamic_headers = headers.copy()
83
- dynamic_headers['Referer'] = referer_url
84
 
85
  # Introduce delay for 'o1-preview' model
86
  if request.model == 'o1-preview':
@@ -89,6 +96,7 @@ async def process_streaming_response(request: ChatRequest):
89
  await asyncio.sleep(delay_seconds)
90
 
91
  json_data = {
 
92
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
93
  "previewToken": None,
94
  "userId": None,
@@ -116,7 +124,7 @@ async def process_streaming_response(request: ChatRequest):
116
  async with client.stream(
117
  "POST",
118
  f"{BASE_URL}/api/chat",
119
- headers=dynamic_headers,
120
  json=json_data,
121
  timeout=100,
122
  ) as response:
@@ -134,30 +142,28 @@ async def process_streaming_response(request: ChatRequest):
134
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
135
  yield "data: [DONE]\n\n"
136
  except httpx.HTTPStatusError as e:
137
- logger.error(f"HTTP error occurred: {e}")
138
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
139
  except httpx.RequestError as e:
140
- logger.error(f"Error occurred during request: {e}")
141
  raise HTTPException(status_code=500, detail=str(e))
142
 
 
143
  async def process_non_streaming_response(request: ChatRequest):
 
144
  agent_mode = AGENT_MODE.get(request.model, {})
145
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
146
  model_prefix = MODEL_PREFIXES.get(request.model, "")
147
  referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
148
  referer_url = f"{BASE_URL}{referer_path}"
 
149
 
150
- # Update headers with dynamic Referer
151
- dynamic_headers = headers.copy()
152
- dynamic_headers['Referer'] = referer_url
153
-
154
- # Introduce delay for 'o1-preview' model
155
- if request.model == 'o1-preview':
156
- delay_seconds = random.randint(20, 60)
157
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
158
- await asyncio.sleep(delay_seconds)
159
 
160
  json_data = {
 
161
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
162
  "previewToken": None,
163
  "userId": None,
@@ -179,20 +185,21 @@ async def process_non_streaming_response(request: ChatRequest):
179
  "mobileClient": False,
180
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
181
  }
 
182
  full_response = ""
183
  async with httpx.AsyncClient() as client:
184
  try:
185
  async with client.stream(
186
- method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
187
  ) as response:
188
  response.raise_for_status()
189
  async for chunk in response.aiter_text():
190
  full_response += chunk
191
  except httpx.HTTPStatusError as e:
192
- logger.error(f"HTTP error occurred: {e}")
193
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
194
  except httpx.RequestError as e:
195
- logger.error(f"Error occurred during request: {e}")
196
  raise HTTPException(status_code=500, detail=str(e))
197
  if full_response.startswith("$@$v=undefined-rv1$@$"):
198
  full_response = full_response[21:]
@@ -213,4 +220,4 @@ async def process_non_streaming_response(request: ChatRequest):
213
  }
214
  ],
215
  "usage": None,
216
- }
 
1
  from datetime import datetime
2
  import json
3
+ import uuid
4
+ import asyncio
5
+ import random
6
  from typing import Any, Dict, Optional
7
 
8
  import httpx
9
+ from fastapi import HTTPException
10
  from api.config import (
11
  MODEL_MAPPING,
12
+ get_headers_api_chat,
13
+ get_headers_chat,
14
+ BASE_URL,
15
  AGENT_MODE,
16
  TRENDING_AGENT_MODE,
 
17
  MODEL_PREFIXES,
18
  MODEL_REFERERS
19
  )
 
20
  from api.models import ChatRequest
 
21
  from api.logger import setup_logger
22
 
 
 
 
 
23
  logger = setup_logger(__name__)
24
 
25
+ # Helper function to create a unique chat ID
26
+ def generate_chat_id() -> str:
27
+ return f"chat-{uuid.uuid4()}"
28
+
29
+ # Helper function to create chat completion data
30
  def create_chat_completion_data(
31
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
32
  ) -> Dict[str, Any]:
 
45
  "usage": None,
46
  }
47
 
48
+ # Function to convert message to dictionary format with optional model prefix
49
  def message_to_dict(message, model_prefix: Optional[str] = None):
50
  if isinstance(message.content, str):
51
  content = message.content
 
68
  else:
69
  return {"role": message.role, "content": message.content}
70
 
71
+ # Function to strip model prefix from content if present
72
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
73
  """Remove the model prefix from the response content if present."""
74
  if model_prefix and content.startswith(model_prefix):
 
77
  logger.debug("No prefix to strip from content.")
78
  return content
79
 
80
+ # Process streaming response with headers from config.py
81
  async def process_streaming_response(request: ChatRequest):
82
+ chat_id = generate_chat_id()
83
  agent_mode = AGENT_MODE.get(request.model, {})
84
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
85
  model_prefix = MODEL_PREFIXES.get(request.model, "")
86
  referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
87
  referer_url = f"{BASE_URL}{referer_path}"
88
 
89
+ # Generate headers for API chat request
90
+ headers_api_chat = get_headers_api_chat(referer_url)
 
91
 
92
  # Introduce delay for 'o1-preview' model
93
  if request.model == 'o1-preview':
 
96
  await asyncio.sleep(delay_seconds)
97
 
98
  json_data = {
99
+ "id": chat_id,
100
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
101
  "previewToken": None,
102
  "userId": None,
 
124
  async with client.stream(
125
  "POST",
126
  f"{BASE_URL}/api/chat",
127
+ headers=headers_api_chat,
128
  json=json_data,
129
  timeout=100,
130
  ) as response:
 
142
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
143
  yield "data: [DONE]\n\n"
144
  except httpx.HTTPStatusError as e:
145
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
146
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
147
  except httpx.RequestError as e:
148
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
149
  raise HTTPException(status_code=500, detail=str(e))
150
 
151
+ # Process non-streaming response with headers from config.py
152
  async def process_non_streaming_response(request: ChatRequest):
153
+ chat_id = generate_chat_id()
154
  agent_mode = AGENT_MODE.get(request.model, {})
155
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
156
  model_prefix = MODEL_PREFIXES.get(request.model, "")
157
  referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
158
  referer_url = f"{BASE_URL}{referer_path}"
159
+ chat_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}"
160
 
161
+ # Generate headers for API chat request and chat request
162
+ headers_api_chat = get_headers_api_chat(referer_url)
163
+ headers_chat = get_headers_chat(chat_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
 
 
 
 
 
 
164
 
165
  json_data = {
166
+ "id": chat_id,
167
  "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
168
  "previewToken": None,
169
  "userId": None,
 
185
  "mobileClient": False,
186
  "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
187
  }
188
+
189
  full_response = ""
190
  async with httpx.AsyncClient() as client:
191
  try:
192
  async with client.stream(
193
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
194
  ) as response:
195
  response.raise_for_status()
196
  async for chunk in response.aiter_text():
197
  full_response += chunk
198
  except httpx.HTTPStatusError as e:
199
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
200
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
201
  except httpx.RequestError as e:
202
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
203
  raise HTTPException(status_code=500, detail=str(e))
204
  if full_response.startswith("$@$v=undefined-rv1$@$"):
205
  full_response = full_response[21:]
 
220
  }
221
  ],
222
  "usage": None,
223
+ }