Niansuh commited on
Commit
f7ad6cb
·
verified ·
1 Parent(s): a93ac0e

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +28 -17
api/utils.py CHANGED
@@ -27,12 +27,12 @@ from api.logger import (
27
  log_strip_prefix,
28
  )
29
 
30
- # Generate a random alphanumeric chat ID
31
  def generate_chat_id(length: int = 7) -> str:
32
  characters = string.ascii_letters + string.digits
33
  return ''.join(random.choices(characters, k=length))
34
 
35
- # Function to create chat completion data
36
  def create_chat_completion_data(
37
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
38
  ) -> Dict[str, Any]:
@@ -51,39 +51,51 @@ def create_chat_completion_data(
51
  "usage": None,
52
  }
53
 
54
- # Convert message to dictionary format with optional model prefix
55
  def message_to_dict(message, model_prefix: Optional[str] = None):
56
  content = message.content
57
  if model_prefix:
58
  content = f"{model_prefix} {content}"
59
- return {"role": message.role, "content": content}
60
-
61
- # Remove model prefix from content if present
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
 
63
  if model_prefix and content.startswith(model_prefix):
64
  log_strip_prefix(model_prefix, content)
65
  return content[len(model_prefix):].strip()
66
  return content
67
 
68
- # Streaming response with headers from config.py
69
  async def process_streaming_response(request: ChatRequest):
70
- # Generate chat_id only if model is in MODEL_REFERERS
71
  chat_id = generate_chat_id() if request.model in MODEL_REFERERS else None
72
  referer_path = MODEL_REFERERS.get(request.model, "")
73
  referer_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}" if chat_id else BASE_URL
74
 
75
- # Log chat ID, model, and referer URL if applicable
76
  log_generated_chat_id_with_referer(chat_id, request.model, referer_url)
77
 
78
- # Fetch model configurations
79
  agent_mode = AGENT_MODE.get(request.model, {})
80
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
81
  model_prefix = MODEL_PREFIXES.get(request.model, "")
82
 
83
- # Dynamic headers based on referer
84
  headers_api_chat = get_headers_api_chat(referer_url)
85
 
86
- # Delay for 'o1-preview' model
87
  if request.model == 'o1-preview':
88
  delay_seconds = random.randint(20, 60)
89
  log_model_delay(delay_seconds, request.model, chat_id)
@@ -139,7 +151,7 @@ async def process_streaming_response(request: ChatRequest):
139
  log_request_error(e, chat_id)
140
  raise HTTPException(status_code=500, detail=str(e))
141
 
142
- # Non-streaming response with headers from config.py
143
  async def process_non_streaming_response(request: ChatRequest):
144
  chat_id = generate_chat_id() if request.model in MODEL_REFERERS else None
145
  referer_path = MODEL_REFERERS.get(request.model, "")
@@ -148,16 +160,15 @@ async def process_non_streaming_response(request: ChatRequest):
148
  # Log with chat ID, model, and referer URL if applicable
149
  log_generated_chat_id_with_referer(chat_id, request.model, referer_url)
150
 
151
- # Model configurations
152
  agent_mode = AGENT_MODE.get(request.model, {})
153
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
154
  model_prefix = MODEL_PREFIXES.get(request.model, "")
155
 
156
- # Headers for API chat and secondary request
157
  headers_api_chat = get_headers_api_chat(referer_url)
158
  headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
159
 
160
- # Delay for 'o1-preview' model
161
  if request.model == 'o1-preview':
162
  delay_seconds = random.randint(20, 60)
163
  log_model_delay(delay_seconds, request.model, chat_id)
@@ -206,7 +217,7 @@ async def process_non_streaming_response(request: ChatRequest):
206
  if full_response.startswith("$@$v=undefined-rv1$@$"):
207
  full_response = full_response[21:]
208
 
209
- # Strip the model prefix
210
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
211
 
212
  return {
 
27
  log_strip_prefix,
28
  )
29
 
30
+ # Helper function to generate a random alphanumeric chat ID
31
  def generate_chat_id(length: int = 7) -> str:
32
  characters = string.ascii_letters + string.digits
33
  return ''.join(random.choices(characters, k=length))
34
 
35
+ # Helper function to create chat completion data
36
  def create_chat_completion_data(
37
  content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
38
  ) -> Dict[str, Any]:
 
51
  "usage": None,
52
  }
53
 
54
+ # Convert message to dictionary format with optional model prefix and image data
55
  def message_to_dict(message, model_prefix: Optional[str] = None):
56
  content = message.content
57
  if model_prefix:
58
  content = f"{model_prefix} {content}"
59
+ if isinstance(message.content, str):
60
+ return {"role": message.role, "content": content}
61
+ elif isinstance(message.content, list) and len(message.content) == 2:
62
+ return {
63
+ "role": message.role,
64
+ "content": content,
65
+ "data": {
66
+ "imageBase64": message.content[1]["image_url"]["url"],
67
+ "fileText": "",
68
+ "title": "snapshot",
69
+ },
70
+ }
71
+ else:
72
+ return {"role": message.role, "content": content}
73
+
74
+ # Function to strip model prefix from content if present
75
  def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
76
+ """Remove the model prefix from the response content if present."""
77
  if model_prefix and content.startswith(model_prefix):
78
  log_strip_prefix(model_prefix, content)
79
  return content[len(model_prefix):].strip()
80
  return content
81
 
82
+ # Streaming response processing with headers from config.py
83
  async def process_streaming_response(request: ChatRequest):
 
84
  chat_id = generate_chat_id() if request.model in MODEL_REFERERS else None
85
  referer_path = MODEL_REFERERS.get(request.model, "")
86
  referer_url = f"{BASE_URL}/chat/{chat_id}?model={request.model}" if chat_id else BASE_URL
87
 
88
+ # Log with chat ID, model, and referer URL if applicable
89
  log_generated_chat_id_with_referer(chat_id, request.model, referer_url)
90
 
 
91
  agent_mode = AGENT_MODE.get(request.model, {})
92
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
93
  model_prefix = MODEL_PREFIXES.get(request.model, "")
94
 
95
+ # Generate headers for API chat request with dynamic Referer
96
  headers_api_chat = get_headers_api_chat(referer_url)
97
 
98
+ # Introduce delay for 'o1-preview' model
99
  if request.model == 'o1-preview':
100
  delay_seconds = random.randint(20, 60)
101
  log_model_delay(delay_seconds, request.model, chat_id)
 
151
  log_request_error(e, chat_id)
152
  raise HTTPException(status_code=500, detail=str(e))
153
 
154
+ # Non-streaming response processing with headers from config.py
155
  async def process_non_streaming_response(request: ChatRequest):
156
  chat_id = generate_chat_id() if request.model in MODEL_REFERERS else None
157
  referer_path = MODEL_REFERERS.get(request.model, "")
 
160
  # Log with chat ID, model, and referer URL if applicable
161
  log_generated_chat_id_with_referer(chat_id, request.model, referer_url)
162
 
 
163
  agent_mode = AGENT_MODE.get(request.model, {})
164
  trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
165
  model_prefix = MODEL_PREFIXES.get(request.model, "")
166
 
167
+ # Generate headers for API chat request and secondary chat request with dynamic Referer
168
  headers_api_chat = get_headers_api_chat(referer_url)
169
  headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
170
 
171
+ # Introduce delay for 'o1-preview' model
172
  if request.model == 'o1-preview':
173
  delay_seconds = random.randint(20, 60)
174
  log_model_delay(delay_seconds, request.model, chat_id)
 
217
  if full_response.startswith("$@$v=undefined-rv1$@$"):
218
  full_response = full_response[21:]
219
 
220
+ # Strip the model prefix from the full response
221
  cleaned_full_response = strip_model_prefix(full_response, model_prefix)
222
 
223
  return {