Niansuh commited on
Commit
a2a629e
·
verified ·
1 Parent(s): 3b1575d

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +52 -39
api/utils.py CHANGED
@@ -16,17 +16,14 @@ from api.config import (
16
  AGENT_MODE,
17
  TRENDING_AGENT_MODE,
18
  MODEL_PREFIXES,
19
- MODEL_REFERERS,
20
  )
21
  from api.models import ChatRequest
22
- from api.logger import (
23
- log_generated_chat_id_with_referer,
24
- log_model_delay,
25
- log_http_error,
26
- log_request_error,
27
- )
28
 
29
- # Helper function to generate a random alphanumeric chat ID
30
  def generate_chat_id(length: int = 7) -> str:
31
  characters = string.ascii_letters + string.digits
32
  return ''.join(random.choices(characters, k=length))
@@ -50,14 +47,12 @@ def create_chat_completion_data(
50
  "usage": None,
51
  }
52
 
53
- # Convert message to dictionary format with optional model prefix and image data
54
  def message_to_dict(message, model_prefix: Optional[str] = None):
55
  content = message.content
56
  if model_prefix:
57
  content = f"{model_prefix} {content}"
58
- if isinstance(message.content, str):
59
- return {"role": message.role, "content": content}
60
- elif isinstance(message.content, list) and len(message.content) == 2:
61
  return {
62
  "role": message.role,
63
  "content": content,
@@ -70,24 +65,32 @@ def message_to_dict(message, model_prefix: Optional[str] = None):
70
  else:
71
  return {"role": message.role, "content": content}
72
 
73
- # Streaming response processing
74
- async def process_streaming_response(request: ChatRequest):
75
- chat_id = generate_chat_id() if request.model in MODEL_REFERERS else None
76
- referer_path = MODEL_REFERERS.get(request.model, "")
77
- referer_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}" if chat_id else BASE_URL
78
-
79
- # Log with chat ID, model, and referer URL if applicable
80
- log_generated_chat_id_with_referer(chat_id, request.model, referer_url)
81
 
 
 
 
 
 
82
  agent_mode = AGENT_MODE.get(request.model, {})
83
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
84
  model_prefix = MODEL_PREFIXES.get(request.model, "")
 
 
85
 
 
86
  headers_api_chat = get_headers_api_chat(referer_url)
87
 
 
88
  if request.model == 'o1-preview':
89
  delay_seconds = random.randint(20, 60)
90
- log_model_delay(delay_seconds, request.model, chat_id)
91
  await asyncio.sleep(delay_seconds)
92
 
93
  json_data = {
@@ -126,36 +129,43 @@ async def process_streaming_response(request: ChatRequest):
126
  response.raise_for_status()
127
  async for line in response.aiter_lines():
128
  timestamp = int(datetime.now().timestamp())
129
- content = line.lstrip("$@$v=undefined-rv1$@$") # Trim only if needed
130
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
 
 
 
 
 
131
 
132
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
133
  yield "data: [DONE]\n\n"
134
  except httpx.HTTPStatusError as e:
135
- log_http_error(e, chat_id)
136
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
137
  except httpx.RequestError as e:
138
- log_request_error(e, chat_id)
139
  raise HTTPException(status_code=500, detail=str(e))
140
 
141
- # Non-streaming response processing
142
  async def process_non_streaming_response(request: ChatRequest):
143
- chat_id = generate_chat_id() if request.model in MODEL_REFERERS else None
144
- referer_path = MODEL_REFERERS.get(request.model, "")
145
- referer_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}" if chat_id else BASE_URL
146
-
147
- log_generated_chat_id_with_referer(chat_id, request.model, referer_url)
148
-
149
  agent_mode = AGENT_MODE.get(request.model, {})
150
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
151
  model_prefix = MODEL_PREFIXES.get(request.model, "")
 
 
 
152
 
 
153
  headers_api_chat = get_headers_api_chat(referer_url)
154
- headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
155
 
 
156
  if request.model == 'o1-preview':
157
  delay_seconds = random.randint(20, 60)
158
- log_model_delay(delay_seconds, request.model, chat_id)
159
  await asyncio.sleep(delay_seconds)
160
 
161
  json_data = {
@@ -192,13 +202,16 @@ async def process_non_streaming_response(request: ChatRequest):
192
  async for chunk in response.aiter_text():
193
  full_response += chunk
194
  except httpx.HTTPStatusError as e:
195
- log_http_error(e, chat_id)
196
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
197
  except httpx.RequestError as e:
198
- log_request_error(e, chat_id)
199
  raise HTTPException(status_code=500, detail=str(e))
200
-
201
- full_response = full_response.lstrip("$@$v=undefined-rv1$@$") # Trim only if needed
 
 
 
202
 
203
  return {
204
  "id": f"chatcmpl-{uuid.uuid4()}",
@@ -208,7 +221,7 @@ async def process_non_streaming_response(request: ChatRequest):
208
  "choices": [
209
  {
210
  "index": 0,
211
- "message": {"role": "assistant", "content": full_response},
212
  "finish_reason": "stop",
213
  }
214
  ],
 
16
  AGENT_MODE,
17
  TRENDING_AGENT_MODE,
18
  MODEL_PREFIXES,
19
+ MODEL_REFERERS
20
  )
21
  from api.models import ChatRequest
22
+ from api.logger import setup_logger
23
+
24
+ logger = setup_logger(__name__)
 
 
 
25
 
26
+ # Helper function to create a random alphanumeric chat ID
27
  def generate_chat_id(length: int = 7) -> str:
28
  characters = string.ascii_letters + string.digits
29
  return ''.join(random.choices(characters, k=length))
 
47
  "usage": None,
48
  }
49
 
50
+ # Function to convert message to dictionary format with optional model prefix
51
  def message_to_dict(message, model_prefix: Optional[str] = None):
52
  content = message.content
53
  if model_prefix:
54
  content = f"{model_prefix} {content}"
55
+ if isinstance(message.content, list) and len(message.content) == 2:
 
 
56
  return {
57
  "role": message.role,
58
  "content": content,
 
65
  else:
66
  return {"role": message.role, "content": content}
67
 
68
+ # Function to strip model prefix from content if present
69
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
70
+ """Remove the model prefix from the response content if present."""
71
+ if model_prefix and content.startswith(model_prefix):
72
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
73
+ return content[len(model_prefix):].strip()
74
+ return content
 
75
 
76
+ # Process streaming response with headers from config.py
77
+ async def process_streaming_response(request: ChatRequest):
78
+ chat_id = generate_chat_id()
79
+ logger.info(f"Generated Chat ID: {chat_id}")
80
+
81
  agent_mode = AGENT_MODE.get(request.model, {})
82
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
83
  model_prefix = MODEL_PREFIXES.get(request.model, "")
84
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
85
+ referer_url = f"{BASE_URL}{referer_path}"
86
 
87
+ # Generate headers for API chat request with dynamic Referer
88
  headers_api_chat = get_headers_api_chat(referer_url)
89
 
90
+ # Introduce delay for 'o1-preview' model
91
  if request.model == 'o1-preview':
92
  delay_seconds = random.randint(20, 60)
93
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
94
  await asyncio.sleep(delay_seconds)
95
 
96
  json_data = {
 
129
  response.raise_for_status()
130
  async for line in response.aiter_lines():
131
  timestamp = int(datetime.now().timestamp())
132
+ if line:
133
+ content = line
134
+ if content.startswith("$@$v=undefined-rv1$@$"):
135
+ content = content[21:]
136
+ # Strip the model prefix from the response content
137
+ cleaned_content = strip_model_prefix(content, model_prefix)
138
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
139
 
140
  yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
141
  yield "data: [DONE]\n\n"
142
  except httpx.HTTPStatusError as e:
143
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
144
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
145
  except httpx.RequestError as e:
146
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
147
  raise HTTPException(status_code=500, detail=str(e))
148
 
149
+ # Process non-streaming response with headers from config.py
150
  async def process_non_streaming_response(request: ChatRequest):
151
+ chat_id = generate_chat_id()
152
+ logger.info(f"Generated Chat ID: {chat_id}")
153
+
 
 
 
154
  agent_mode = AGENT_MODE.get(request.model, {})
155
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
156
  model_prefix = MODEL_PREFIXES.get(request.model, "")
157
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
158
+ referer_url = f"{BASE_URL}{referer_path}"
159
+ chat_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}"
160
 
161
+ # Generate headers for API chat request and chat request with dynamic Referer
162
  headers_api_chat = get_headers_api_chat(referer_url)
163
+ headers_chat = get_headers_chat(chat_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
164
 
165
+ # Introduce delay for 'o1-preview' model
166
  if request.model == 'o1-preview':
167
  delay_seconds = random.randint(20, 60)
168
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
169
  await asyncio.sleep(delay_seconds)
170
 
171
  json_data = {
 
202
  async for chunk in response.aiter_text():
203
  full_response += chunk
204
  except httpx.HTTPStatusError as e:
205
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
206
  raise HTTPException(status_code=e.response.status_code, detail=str(e))
207
  except httpx.RequestError as e:
208
+ logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}")
209
  raise HTTPException(status_code=500, detail=str(e))
210
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
211
+ full_response = full_response[21:]
212
+
213
+ # Strip the model prefix from the full response
214
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
215
 
216
  return {
217
  "id": f"chatcmpl-{uuid.uuid4()}",
 
221
  "choices": [
222
  {
223
  "index": 0,
224
+ "message": {"role": "assistant", "content": cleaned_full_response},
225
  "finish_reason": "stop",
226
  }
227
  ],