Niansuh commited on
Commit
b88e75c
·
verified ·
1 Parent(s): a2a629e

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +14 -16
api/utils.py CHANGED
@@ -73,21 +73,25 @@ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
73
  return content[len(model_prefix):].strip()
74
  return content
75
 
 
 
 
 
 
 
 
76
  # Process streaming response with headers from config.py
77
  async def process_streaming_response(request: ChatRequest):
78
  chat_id = generate_chat_id()
79
- logger.info(f"Generated Chat ID: {chat_id}")
80
-
 
81
  agent_mode = AGENT_MODE.get(request.model, {})
82
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
83
  model_prefix = MODEL_PREFIXES.get(request.model, "")
84
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
85
- referer_url = f"{BASE_URL}{referer_path}"
86
 
87
- # Generate headers for API chat request with dynamic Referer
88
  headers_api_chat = get_headers_api_chat(referer_url)
89
 
90
- # Introduce delay for 'o1-preview' model
91
  if request.model == 'o1-preview':
92
  delay_seconds = random.randint(20, 60)
93
  logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
@@ -133,7 +137,6 @@ async def process_streaming_response(request: ChatRequest):
133
  content = line
134
  if content.startswith("$@$v=undefined-rv1$@$"):
135
  content = content[21:]
136
- # Strip the model prefix from the response content
137
  cleaned_content = strip_model_prefix(content, model_prefix)
138
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
139
 
@@ -149,20 +152,16 @@ async def process_streaming_response(request: ChatRequest):
149
  # Process non-streaming response with headers from config.py
150
  async def process_non_streaming_response(request: ChatRequest):
151
  chat_id = generate_chat_id()
152
- logger.info(f"Generated Chat ID: {chat_id}")
153
-
 
154
  agent_mode = AGENT_MODE.get(request.model, {})
155
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
156
  model_prefix = MODEL_PREFIXES.get(request.model, "")
157
- referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
158
- referer_url = f"{BASE_URL}{referer_path}"
159
- chat_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}"
160
 
161
- # Generate headers for API chat request and chat request with dynamic Referer
162
  headers_api_chat = get_headers_api_chat(referer_url)
163
- headers_chat = get_headers_chat(chat_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
164
 
165
- # Introduce delay for 'o1-preview' model
166
  if request.model == 'o1-preview':
167
  delay_seconds = random.randint(20, 60)
168
  logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
@@ -210,7 +209,6 @@ async def process_non_streaming_response(request: ChatRequest):
210
  if full_response.startswith("$@$v=undefined-rv1$@$"):
211
  full_response = full_response[21:]
212
 
213
- # Strip the model prefix from the full response
214
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
215
 
216
  return {
 
73
  return content[len(model_prefix):].strip()
74
  return content
75
 
76
+ # Function to get the correct referer URL for logging
77
+ def get_referer_url(chat_id: str, model: str) -> str:
78
+ """Generate the referer URL based on specific models."""
79
+ if model in MODEL_REFERERS:
80
+ return f"{BASE_URL}/chat/{chat_id}?model={model}"
81
+ return BASE_URL
82
+
83
  # Process streaming response with headers from config.py
84
  async def process_streaming_response(request: ChatRequest):
85
  chat_id = generate_chat_id()
86
+ referer_url = get_referer_url(chat_id, request.model)
87
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
88
+
89
  agent_mode = AGENT_MODE.get(request.model, {})
90
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
91
  model_prefix = MODEL_PREFIXES.get(request.model, "")
 
 
92
 
 
93
  headers_api_chat = get_headers_api_chat(referer_url)
94
 
 
95
  if request.model == 'o1-preview':
96
  delay_seconds = random.randint(20, 60)
97
  logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
 
137
  content = line
138
  if content.startswith("$@$v=undefined-rv1$@$"):
139
  content = content[21:]
 
140
  cleaned_content = strip_model_prefix(content, model_prefix)
141
  yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
142
 
 
152
  # Process non-streaming response with headers from config.py
153
  async def process_non_streaming_response(request: ChatRequest):
154
  chat_id = generate_chat_id()
155
+ referer_url = get_referer_url(chat_id, request.model)
156
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
157
+
158
  agent_mode = AGENT_MODE.get(request.model, {})
159
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
160
  model_prefix = MODEL_PREFIXES.get(request.model, "")
 
 
 
161
 
 
162
  headers_api_chat = get_headers_api_chat(referer_url)
163
+ headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
164
 
 
165
  if request.model == 'o1-preview':
166
  delay_seconds = random.randint(20, 60)
167
  logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
 
209
  if full_response.startswith("$@$v=undefined-rv1$@$"):
210
  full_response = full_response[21:]
211
 
 
212
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
213
 
214
  return {