Niansuh commited on
Commit
b78d21c
·
verified ·
1 Parent(s): af25e72

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +24 -15
api/utils.py CHANGED
@@ -22,6 +22,7 @@ from api.config import (
22
  )
23
  from api.models import ChatRequest
24
  from api.logger import setup_logger
 
25
 
26
  logger = setup_logger(__name__)
27
 
@@ -76,18 +77,21 @@ def get_referer_url(chat_id: str, model: str) -> str:
76
  return f"{BASE_URL}/chat/{chat_id}?model={model}"
77
 
78
  # Process streaming response with headers from config.py
79
- async def process_streaming_response(request: ChatRequest):
80
  chat_id = generate_id()
81
  referer_url = get_referer_url(chat_id, request.model)
82
  logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
83
 
84
- agent_mode = AGENT_MODE.get(request.model, {})
85
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
86
- model_prefix = MODEL_PREFIXES.get(request.model, "")
 
 
 
87
 
88
  headers_api_chat = get_headers_api_chat()
89
 
90
- if request.model == 'o1-preview':
91
  delay_seconds = random.randint(1, 60)
92
  logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
93
  await asyncio.sleep(delay_seconds)
@@ -110,10 +114,11 @@ async def process_streaming_response(request: ChatRequest):
110
  "previewToken": None,
111
  "trendingAgentMode": trending_agent_mode,
112
  "userId": None,
113
- "userSelectedModel": request.model if request.model in MODELS else None,
114
  "userSystemPrompt": None,
115
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
116
  "visitFromDelta": False,
 
117
  }
118
 
119
  async with httpx.AsyncClient() as client:
@@ -133,9 +138,9 @@ async def process_streaming_response(request: ChatRequest):
133
  if content.startswith("$@$v=undefined-rv1$@$"):
134
  content = content[21:]
135
  cleaned_content = strip_model_prefix(content, model_prefix)
136
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
137
 
138
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
139
  yield "data: [DONE]\n\n"
140
  except httpx.HTTPStatusError as e:
141
  logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
@@ -145,18 +150,21 @@ async def process_streaming_response(request: ChatRequest):
145
  raise HTTPException(status_code=500, detail=str(e))
146
 
147
  # Process non-streaming response with headers from config.py
148
- async def process_non_streaming_response(request: ChatRequest):
149
  chat_id = generate_id()
150
  referer_url = get_referer_url(chat_id, request.model)
151
  logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
152
 
153
- agent_mode = AGENT_MODE.get(request.model, {})
154
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
155
- model_prefix = MODEL_PREFIXES.get(request.model, "")
 
 
 
156
 
157
  headers_api_chat = get_headers_api_chat()
158
 
159
- if request.model == 'o1-preview':
160
  delay_seconds = random.randint(20, 60)
161
  logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
162
  await asyncio.sleep(delay_seconds)
@@ -179,10 +187,11 @@ async def process_non_streaming_response(request: ChatRequest):
179
  "previewToken": None,
180
  "trendingAgentMode": trending_agent_mode,
181
  "userId": None,
182
- "userSelectedModel": request.model if request.model in MODELS else None,
183
  "userSystemPrompt": None,
184
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
185
  "visitFromDelta": False,
 
186
  }
187
 
188
  full_response = ""
@@ -214,7 +223,7 @@ async def process_non_streaming_response(request: ChatRequest):
214
  "id": f"chatcmpl-{uuid.uuid4()}",
215
  "object": "chat.completion",
216
  "created": int(datetime.now().timestamp()),
217
- "model": request.model,
218
  "choices": [
219
  {
220
  "index": 0,
 
22
  )
23
  from api.models import ChatRequest
24
  from api.logger import setup_logger
25
+ from api.image import ImageResponse, to_data_uri # Assuming image utilities are here
26
 
27
  logger = setup_logger(__name__)
28
 
 
77
  return f"{BASE_URL}/chat/{chat_id}?model={model}"
78
 
79
  # Process streaming response with headers from config.py
80
+ async def process_streaming_response(request: ChatRequest, web_search: bool = False):
81
  chat_id = generate_id()
82
  referer_url = get_referer_url(chat_id, request.model)
83
  logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
84
 
85
+ # Resolve model aliases
86
+ resolved_model = MODEL_ALIASES.get(request.model, request.model)
87
+
88
+ agent_mode = AGENT_MODE.get(resolved_model, {})
89
+ trending_agent_mode = TRENDING_AGENT_MODE.get(resolved_model, {})
90
+ model_prefix = MODEL_PREFIXES.get(resolved_model, "")
91
 
92
  headers_api_chat = get_headers_api_chat()
93
 
94
+ if resolved_model == 'o1-preview':
95
  delay_seconds = random.randint(1, 60)
96
  logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
97
  await asyncio.sleep(delay_seconds)
 
114
  "previewToken": None,
115
  "trendingAgentMode": trending_agent_mode,
116
  "userId": None,
117
+ "userSelectedModel": resolved_model if resolved_model in MODELS else None,
118
  "userSystemPrompt": None,
119
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
120
  "visitFromDelta": False,
121
+ "webSearchMode": web_search, # Include web search mode
122
  }
123
 
124
  async with httpx.AsyncClient() as client:
 
138
  if content.startswith("$@$v=undefined-rv1$@$"):
139
  content = content[21:]
140
  cleaned_content = strip_model_prefix(content, model_prefix)
141
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, resolved_model, timestamp))}\n\n"
142
 
143
+ yield f"data: {json.dumps(create_chat_completion_data('', resolved_model, timestamp, 'stop'))}\n\n"
144
  yield "data: [DONE]\n\n"
145
  except httpx.HTTPStatusError as e:
146
  logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}")
 
150
  raise HTTPException(status_code=500, detail=str(e))
151
 
152
  # Process non-streaming response with headers from config.py
153
+ async def process_non_streaming_response(request: ChatRequest, web_search: bool = False):
154
  chat_id = generate_id()
155
  referer_url = get_referer_url(chat_id, request.model)
156
  logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}")
157
 
158
+ # Resolve model aliases
159
+ resolved_model = MODEL_ALIASES.get(request.model, request.model)
160
+
161
+ agent_mode = AGENT_MODE.get(resolved_model, {})
162
+ trending_agent_mode = TRENDING_AGENT_MODE.get(resolved_model, {})
163
+ model_prefix = MODEL_PREFIXES.get(resolved_model, "")
164
 
165
  headers_api_chat = get_headers_api_chat()
166
 
167
+ if resolved_model == 'o1-preview':
168
  delay_seconds = random.randint(20, 60)
169
  logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Chat ID: {chat_id})")
170
  await asyncio.sleep(delay_seconds)
 
187
  "previewToken": None,
188
  "trendingAgentMode": trending_agent_mode,
189
  "userId": None,
190
+ "userSelectedModel": resolved_model if resolved_model in MODELS else None,
191
  "userSystemPrompt": None,
192
  "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
193
  "visitFromDelta": False,
194
+ "webSearchMode": web_search, # Include web search mode
195
  }
196
 
197
  full_response = ""
 
223
  "id": f"chatcmpl-{uuid.uuid4()}",
224
  "object": "chat.completion",
225
  "created": int(datetime.now().timestamp()),
226
+ "model": resolved_model,
227
  "choices": [
228
  {
229
  "index": 0,