Niansuh commited on
Commit
a93ac0e
·
verified ·
1 Parent(s): 3bd47a9

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +31 -52
api/utils.py CHANGED
@@ -16,7 +16,7 @@ from api.config import (
16
  AGENT_MODE,
17
  TRENDING_AGENT_MODE,
18
  MODEL_PREFIXES,
19
- MODEL_REFERERS
20
  )
21
  from api.models import ChatRequest
22
  from api.logger import (
@@ -24,15 +24,15 @@ from api.logger import (
24
  log_model_delay,
25
  log_http_error,
26
  log_request_error,
27
- log_strip_prefix
28
  )
29
 
30
- # Helper function to generate a random alphanumeric chat ID
31
  def generate_chat_id(length: int = 7) -> str:
32
  characters = string.ascii_letters + string.digits
33
  return ''.join(random.choices(characters, k=length))
34
 
35
- # Helper function to create chat completion data
36
  def create_chat_completion_data(
37
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
38
  ) -> Dict[str, Any]:
@@ -51,56 +51,39 @@ def create_chat_completion_data(
51
  "usage": None,
52
  }
53
 
54
- # Function to convert message to dictionary format with optional model prefix
55
  def message_to_dict(message, model_prefix: Optional[str] = None):
56
- if isinstance(message.content, str):
57
- content = message.content
58
- if model_prefix:
59
- content = f"{model_prefix} {content}"
60
- return {"role": message.role, "content": content}
61
- elif isinstance(message.content, list) and len(message.content) == 2:
62
- content = message.content[0]["text"]
63
- if model_prefix:
64
- content = f"{model_prefix} {content}"
65
- return {
66
- "role": message.role,
67
- "content": content,
68
- "data": {
69
- "imageBase64": message.content[1]["image_url"]["url"],
70
- "fileText": "",
71
- "title": "snapshot",
72
- },
73
- }
74
- else:
75
- return {"role": message.role, "content": message.content}
76
-
77
- # Function to strip model prefix from content if present
78
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
79
- """Remove the model prefix from the response content if present."""
80
  if model_prefix and content.startswith(model_prefix):
81
  log_strip_prefix(model_prefix, content)
82
  return content[len(model_prefix):].strip()
83
  return content
84
 
85
- # Process streaming response with headers from config.py
86
  async def process_streaming_response(request: ChatRequest):
87
- chat_id = generate_chat_id()
88
-
89
- # Get referer URL only if model is in MODEL_REFERERS
90
  referer_path = MODEL_REFERERS.get(request.model, "")
91
- referer_url = f"{BASE_URL}{referer_path}" if referer_path else BASE_URL
92
 
93
- # Log with chat ID, model, and referer URL (if present)
94
  log_generated_chat_id_with_referer(chat_id, request.model, referer_url)
95
 
 
96
  agent_mode = AGENT_MODE.get(request.model, {})
97
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
98
  model_prefix = MODEL_PREFIXES.get(request.model, "")
99
 
100
- # Generate headers for API chat request with dynamic Referer
101
  headers_api_chat = get_headers_api_chat(referer_url)
102
 
103
- # Introduce delay for 'o1-preview' model
104
  if request.model == 'o1-preview':
105
  delay_seconds = random.randint(20, 60)
106
  log_model_delay(delay_seconds, request.model, chat_id)
@@ -143,10 +126,7 @@ async def process_streaming_response(request: ChatRequest):
143
  async for line in response.aiter_lines():
144
  timestamp = int(datetime.now().timestamp())
145
  if line:
146
- content = line
147
- if content.startswith("$@$v=undefined-rv1$@$"):
148
- content = content[21:]
149
- # Strip the model prefix from the response content
150
  cleaned_content = strip_model_prefix(content, model_prefix)
151
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
152
 
@@ -159,27 +139,25 @@ async def process_streaming_response(request: ChatRequest):
159
  log_request_error(e, chat_id)
160
  raise HTTPException(status_code=500, detail=str(e))
161
 
162
- # Process non-streaming response with headers from config.py
163
  async def process_non_streaming_response(request: ChatRequest):
164
- chat_id = generate_chat_id()
165
-
166
- # Get referer URL only if model is in MODEL_REFERERS
167
  referer_path = MODEL_REFERERS.get(request.model, "")
168
- referer_url = f"{BASE_URL}{referer_path}" if referer_path else BASE_URL
169
- chat_url = f"{referer_url}/chat/{chat_id}?model={request.model}"
170
 
171
- # Log with chat ID, model, and referer URL (if present)
172
- log_generated_chat_id_with_referer(chat_id, request.model, chat_url)
173
 
 
174
  agent_mode = AGENT_MODE.get(request.model, {})
175
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
176
  model_prefix = MODEL_PREFIXES.get(request.model, "")
177
 
178
- # Generate headers for API chat request and chat request with dynamic Referer
179
  headers_api_chat = get_headers_api_chat(referer_url)
180
- headers_chat = get_headers_chat(chat_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
181
 
182
- # Introduce delay for 'o1-preview' model
183
  if request.model == 'o1-preview':
184
  delay_seconds = random.randint(20, 60)
185
  log_model_delay(delay_seconds, request.model, chat_id)
@@ -224,10 +202,11 @@ async def process_non_streaming_response(request: ChatRequest):
224
  except httpx.RequestError as e:
225
  log_request_error(e, chat_id)
226
  raise HTTPException(status_code=500, detail=str(e))
 
227
  if full_response.startswith("$@$v=undefined-rv1$@$"):
228
  full_response = full_response[21:]
229
 
230
- # Strip the model prefix from the full response
231
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
232
 
233
  return {
 
16
  AGENT_MODE,
17
  TRENDING_AGENT_MODE,
18
  MODEL_PREFIXES,
19
+ MODEL_REFERERS,
20
  )
21
  from api.models import ChatRequest
22
  from api.logger import (
 
24
  log_model_delay,
25
  log_http_error,
26
  log_request_error,
27
+ log_strip_prefix,
28
  )
29
 
30
+ # Generate a random alphanumeric chat ID
31
  def generate_chat_id(length: int = 7) -> str:
32
  characters = string.ascii_letters + string.digits
33
  return ''.join(random.choices(characters, k=length))
34
 
35
+ # Function to create chat completion data
36
  def create_chat_completion_data(
37
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
38
  ) -> Dict[str, Any]:
 
51
  "usage": None,
52
  }
53
 
54
+ # Convert message to dictionary format with optional model prefix
55
  def message_to_dict(message, model_prefix: Optional[str] = None):
56
+ content = message.content
57
+ if model_prefix:
58
+ content = f"{model_prefix} {content}"
59
+ return {"role": message.role, "content": content}
60
+
61
+ # Remove model prefix from content if present
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
 
63
  if model_prefix and content.startswith(model_prefix):
64
  log_strip_prefix(model_prefix, content)
65
  return content[len(model_prefix):].strip()
66
  return content
67
 
68
+ # Streaming response with headers from config.py
69
  async def process_streaming_response(request: ChatRequest):
70
+ # Generate chat_id only if model is in MODEL_REFERERS
71
+ chat_id = generate_chat_id() if request.model in MODEL_REFERERS else None
 
72
  referer_path = MODEL_REFERERS.get(request.model, "")
73
+ referer_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}" if chat_id else BASE_URL
74
 
75
+ # Log chat ID, model, and referer URL if applicable
76
  log_generated_chat_id_with_referer(chat_id, request.model, referer_url)
77
 
78
+ # Fetch model configurations
79
  agent_mode = AGENT_MODE.get(request.model, {})
80
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
81
  model_prefix = MODEL_PREFIXES.get(request.model, "")
82
 
83
+ # Dynamic headers based on referer
84
  headers_api_chat = get_headers_api_chat(referer_url)
85
 
86
+ # Delay for 'o1-preview' model
87
  if request.model == 'o1-preview':
88
  delay_seconds = random.randint(20, 60)
89
  log_model_delay(delay_seconds, request.model, chat_id)
 
126
  async for line in response.aiter_lines():
127
  timestamp = int(datetime.now().timestamp())
128
  if line:
129
+ content = line.lstrip("$@$v=undefined-rv1$@$")
 
 
 
130
  cleaned_content = strip_model_prefix(content, model_prefix)
131
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
132
 
 
139
  log_request_error(e, chat_id)
140
  raise HTTPException(status_code=500, detail=str(e))
141
 
142
+ # Non-streaming response with headers from config.py
143
  async def process_non_streaming_response(request: ChatRequest):
144
+ chat_id = generate_chat_id() if request.model in MODEL_REFERERS else None
 
 
145
  referer_path = MODEL_REFERERS.get(request.model, "")
146
+ referer_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}" if chat_id else BASE_URL
 
147
 
148
+ # Log with chat ID, model, and referer URL if applicable
149
+ log_generated_chat_id_with_referer(chat_id, request.model, referer_url)
150
 
151
+ # Model configurations
152
  agent_mode = AGENT_MODE.get(request.model, {})
153
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
154
  model_prefix = MODEL_PREFIXES.get(request.model, "")
155
 
156
+ # Headers for API chat and secondary request
157
  headers_api_chat = get_headers_api_chat(referer_url)
158
+ headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
159
 
160
+ # Delay for 'o1-preview' model
161
  if request.model == 'o1-preview':
162
  delay_seconds = random.randint(20, 60)
163
  log_model_delay(delay_seconds, request.model, chat_id)
 
202
  except httpx.RequestError as e:
203
  log_request_error(e, chat_id)
204
  raise HTTPException(status_code=500, detail=str(e))
205
+
206
  if full_response.startswith("$@$v=undefined-rv1$@$"):
207
  full_response = full_response[21:]
208
 
209
+ # Strip the model prefix
210
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
211
 
212
  return {