Niansuh commited on
Commit
c2bd343
·
verified ·
1 Parent(s): f8c899a

Update api/utils.py

Browse files
Files changed (1) hide show
  1. api/utils.py +242 -480
api/utils.py CHANGED
@@ -1,480 +1,242 @@
1
- from datetime import datetime
2
- import json
3
- from typing import Any, Dict, Optional
4
- import uuid
5
-
6
- import httpx
7
- from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
8
- from fastapi import HTTPException
9
- from api.models import ChatRequest
10
-
11
- from api.logger import setup_logger
12
-
13
- logger = setup_logger(__name__)
14
-
15
-
16
- def create_chat_completion_data(
17
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
18
- ) -> Dict[str, Any]:
19
- return {
20
- "id": f"chatcmpl-{uuid.uuid4()}",
21
- "object": "chat.completion.chunk",
22
- "created": timestamp,
23
- "model": model,
24
- "choices": [
25
- {
26
- "index": 0,
27
- "delta": {"content": content, "role": "assistant"},
28
- "finish_reason": finish_reason,
29
- }
30
- ],
31
- "usage": None,
32
- }
33
-
34
-
35
- def message_to_dict(message):
36
- if isinstance(message.content, str):
37
- return {"role": message.role, "content": message.content}
38
- elif isinstance(message.content, list) and len(message.content) == 2:
39
- return {
40
- "role": message.role,
41
- "content": message.content[0]["text"],
42
- "data": {
43
- "imageBase64": message.content[1]["image_url"]["url"],
44
- "fileText": "",
45
- "title": "snapshot",
46
- },
47
- }
48
- else:
49
- return {"role": message.role, "content": message.content}
50
-
51
-
52
- async def process_streaming_response(request: ChatRequest):
53
- agent_mode = AGENT_MODE.get(request.model, {})
54
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
55
- json_data = {
56
- "messages": [message_to_dict(msg) for msg in request.messages],
57
- "previewToken": None,
58
- "userId": None,
59
- "codeModelMode": True,
60
- "agentMode": agent_mode,
61
- "trendingAgentMode": trending_agent_mode,
62
- "isMicMode": False,
63
- "userSystemPrompt": None,
64
- "maxTokens": request.max_tokens,
65
- "playgroundTopP": request.top_p,
66
- "playgroundTemperature": request.temperature,
67
- "isChromeExt": False,
68
- "githubToken": None,
69
- "clickedAnswer2": False,
70
- "clickedAnswer3": False,
71
- "clickedForceWebSearch": False,
72
- "visitFromDelta": False,
73
- "mobileClient": False,
74
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
75
- }
76
-
77
- async with httpx.AsyncClient() as client:
78
- try:
79
- async with client.stream(
80
- "POST",
81
- f"{BASE_URL}/api/chat",
82
- headers=headers,
83
- json=json_data,
84
- timeout=100,
85
- ) as response:
86
- response.raise_for_status()
87
- async for line in response.aiter_lines():
88
- timestamp = int(datetime.now().timestamp())
89
- if line:
90
- content = line
91
- if content.startswith("$@$v=undefined-rv1$@$"):
92
- content = content[21:]
93
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
94
-
95
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
96
- yield "data: [DONE]\n\n"
97
- except httpx.HTTPStatusError as e:
98
- logger.error(f"HTTP error occurred: {e}")
99
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
100
- except httpx.RequestError as e:
101
- logger.error(f"Error occurred during request: {e}")
102
- raise HTTPException(status_code=500, detail=str(e))
103
-
104
-
105
- async def process_non_streaming_response(request: ChatRequest):
106
- agent_mode = AGENT_MODE.get(request.model, {})
107
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
108
- json_data = {
109
- "messages": [message_to_dict(msg) for msg in request.messages],
110
- "previewToken": None,
111
- "userId": None,
112
- "codeModelMode": True,
113
- "agentMode": agent_mode,
114
- "trendingAgentMode": trending_agent_mode,
115
- "isMicMode": False,
116
- "userSystemPrompt": None,
117
- "maxTokens": request.max_tokens,
118
- "playgroundTopP": request.top_p,
119
- "playgroundTemperature": request.temperature,
120
- "isChromeExt": False,
121
- "githubToken": None,
122
- "clickedAnswer2": False,
123
- "clickedAnswer3": False,
124
- "clickedForceWebSearch": False,
125
- "visitFromDelta": False,
126
- "mobileClient": False,
127
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
128
- }
129
- full_response = ""
130
- async with httpx.AsyncClient() as client:
131
- try:
132
- async with client.stream(
133
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
134
- ) as response:
135
- response.raise_for_status()
136
- async for chunk in response.aiter_text():
137
- full_response += chunk
138
- except httpx.HTTPStatusError as e:
139
- logger.error(f"HTTP error occurred: {e}")
140
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
141
- except httpx.RequestError as e:
142
- logger.error(f"Error occurred during request: {e}")
143
- raise HTTPException(status_code=500, detail=str(e))
144
- if full_response.startswith("$@$v=undefined-rv1$@$"):
145
- full_response = full_response[21:]
146
-
147
- return {
148
- "id": f"chatcmpl-{uuid.uuid4()}",
149
- "object": "chat.completion",
150
- "created": int(datetime.now().timestamp()),
151
- "model": request.model,
152
- "choices": [
153
- {
154
- "index": 0,
155
- "message": {"role": "assistant", "content": full_response},
156
- "finish_reason": "stop",
157
- }
158
- ],
159
- "usage": None,
160
- }
161
- from datetime import datetime
162
- import json
163
- from typing import Any, Dict, Optional
164
- import uuid
165
-
166
- import httpx
167
- from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
168
- from fastapi import HTTPException
169
- from api.models import ChatRequest
170
-
171
- from api.logger import setup_logger
172
-
173
- logger = setup_logger(__name__)
174
-
175
-
176
- def create_chat_completion_data(
177
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
178
- ) -> Dict[str, Any]:
179
- return {
180
- "id": f"chatcmpl-{uuid.uuid4()}",
181
- "object": "chat.completion.chunk",
182
- "created": timestamp,
183
- "model": model,
184
- "choices": [
185
- {
186
- "index": 0,
187
- "delta": {"content": content, "role": "assistant"},
188
- "finish_reason": finish_reason,
189
- }
190
- ],
191
- "usage": None,
192
- }
193
-
194
-
195
- def message_to_dict(message):
196
- if isinstance(message.content, str):
197
- return {"role": message.role, "content": message.content}
198
- elif isinstance(message.content, list) and len(message.content) == 2:
199
- return {
200
- "role": message.role,
201
- "content": message.content[0]["text"],
202
- "data": {
203
- "imageBase64": message.content[1]["image_url"]["url"],
204
- "fileText": "",
205
- "title": "snapshot",
206
- },
207
- }
208
- else:
209
- return {"role": message.role, "content": message.content}
210
-
211
-
212
- async def process_streaming_response(request: ChatRequest):
213
- agent_mode = AGENT_MODE.get(request.model, {})
214
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
215
- json_data = {
216
- "messages": [message_to_dict(msg) for msg in request.messages],
217
- "previewToken": None,
218
- "userId": None,
219
- "codeModelMode": True,
220
- "agentMode": agent_mode,
221
- "trendingAgentMode": trending_agent_mode,
222
- "isMicMode": False,
223
- "userSystemPrompt": None,
224
- "maxTokens": request.max_tokens,
225
- "playgroundTopP": request.top_p,
226
- "playgroundTemperature": request.temperature,
227
- "isChromeExt": False,
228
- "githubToken": None,
229
- "clickedAnswer2": False,
230
- "clickedAnswer3": False,
231
- "clickedForceWebSearch": False,
232
- "visitFromDelta": False,
233
- "mobileClient": False,
234
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
235
- }
236
-
237
- async with httpx.AsyncClient() as client:
238
- try:
239
- async with client.stream(
240
- "POST",
241
- f"{BASE_URL}/api/chat",
242
- headers=headers,
243
- json=json_data,
244
- timeout=100,
245
- ) as response:
246
- response.raise_for_status()
247
- async for line in response.aiter_lines():
248
- timestamp = int(datetime.now().timestamp())
249
- if line:
250
- content = line
251
- if content.startswith("$@$v=undefined-rv1$@$"):
252
- content = content[21:]
253
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
254
-
255
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
256
- yield "data: [DONE]\n\n"
257
- except httpx.HTTPStatusError as e:
258
- logger.error(f"HTTP error occurred: {e}")
259
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
260
- except httpx.RequestError as e:
261
- logger.error(f"Error occurred during request: {e}")
262
- raise HTTPException(status_code=500, detail=str(e))
263
-
264
-
265
- async def process_non_streaming_response(request: ChatRequest):
266
- agent_mode = AGENT_MODE.get(request.model, {})
267
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
268
- json_data = {
269
- "messages": [message_to_dict(msg) for msg in request.messages],
270
- "previewToken": None,
271
- "userId": None,
272
- "codeModelMode": True,
273
- "agentMode": agent_mode,
274
- "trendingAgentMode": trending_agent_mode,
275
- "isMicMode": False,
276
- "userSystemPrompt": None,
277
- "maxTokens": request.max_tokens,
278
- "playgroundTopP": request.top_p,
279
- "playgroundTemperature": request.temperature,
280
- "isChromeExt": False,
281
- "githubToken": None,
282
- "clickedAnswer2": False,
283
- "clickedAnswer3": False,
284
- "clickedForceWebSearch": False,
285
- "visitFromDelta": False,
286
- "mobileClient": False,
287
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
288
- }
289
- full_response = ""
290
- async with httpx.AsyncClient() as client:
291
- try:
292
- async with client.stream(
293
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
294
- ) as response:
295
- response.raise_for_status()
296
- async for chunk in response.aiter_text():
297
- full_response += chunk
298
- except httpx.HTTPStatusError as e:
299
- logger.error(f"HTTP error occurred: {e}")
300
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
301
- except httpx.RequestError as e:
302
- logger.error(f"Error occurred during request: {e}")
303
- raise HTTPException(status_code=500, detail=str(e))
304
- if full_response.startswith("$@$v=undefined-rv1$@$"):
305
- full_response = full_response[21:]
306
-
307
- return {
308
- "id": f"chatcmpl-{uuid.uuid4()}",
309
- "object": "chat.completion",
310
- "created": int(datetime.now().timestamp()),
311
- "model": request.model,
312
- "choices": [
313
- {
314
- "index": 0,
315
- "message": {"role": "assistant", "content": full_response},
316
- "finish_reason": "stop",
317
- }
318
- ],
319
- "usage": None,
320
- }
321
- from datetime import datetime
322
- import json
323
- from typing import Any, Dict, Optional
324
- import uuid
325
-
326
- import httpx
327
- from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
328
- from fastapi import HTTPException
329
- from api.models import ChatRequest
330
-
331
- from api.logger import setup_logger
332
-
333
- logger = setup_logger(__name__)
334
-
335
-
336
- def create_chat_completion_data(
337
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
338
- ) -> Dict[str, Any]:
339
- return {
340
- "id": f"chatcmpl-{uuid.uuid4()}",
341
- "object": "chat.completion.chunk",
342
- "created": timestamp,
343
- "model": model,
344
- "choices": [
345
- {
346
- "index": 0,
347
- "delta": {"content": content, "role": "assistant"},
348
- "finish_reason": finish_reason,
349
- }
350
- ],
351
- "usage": None,
352
- }
353
-
354
-
355
- def message_to_dict(message):
356
- if isinstance(message.content, str):
357
- return {"role": message.role, "content": message.content}
358
- elif isinstance(message.content, list) and len(message.content) == 2:
359
- return {
360
- "role": message.role,
361
- "content": message.content[0]["text"],
362
- "data": {
363
- "imageBase64": message.content[1]["image_url"]["url"],
364
- "fileText": "",
365
- "title": "snapshot",
366
- },
367
- }
368
- else:
369
- return {"role": message.role, "content": message.content}
370
-
371
-
372
- async def process_streaming_response(request: ChatRequest):
373
- agent_mode = AGENT_MODE.get(request.model, {})
374
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
375
- json_data = {
376
- "messages": [message_to_dict(msg) for msg in request.messages],
377
- "previewToken": None,
378
- "userId": None,
379
- "codeModelMode": True,
380
- "agentMode": agent_mode,
381
- "trendingAgentMode": trending_agent_mode,
382
- "isMicMode": False,
383
- "userSystemPrompt": None,
384
- "maxTokens": request.max_tokens,
385
- "playgroundTopP": request.top_p,
386
- "playgroundTemperature": request.temperature,
387
- "isChromeExt": False,
388
- "githubToken": None,
389
- "clickedAnswer2": False,
390
- "clickedAnswer3": False,
391
- "clickedForceWebSearch": False,
392
- "visitFromDelta": False,
393
- "mobileClient": False,
394
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
395
- }
396
-
397
- async with httpx.AsyncClient() as client:
398
- try:
399
- async with client.stream(
400
- "POST",
401
- f"{BASE_URL}/api/chat",
402
- headers=headers,
403
- json=json_data,
404
- timeout=100,
405
- ) as response:
406
- response.raise_for_status()
407
- async for line in response.aiter_lines():
408
- timestamp = int(datetime.now().timestamp())
409
- if line:
410
- content = line
411
- if content.startswith("$@$v=undefined-rv1$@$"):
412
- content = content[21:]
413
- yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
414
-
415
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
416
- yield "data: [DONE]\n\n"
417
- except httpx.HTTPStatusError as e:
418
- logger.error(f"HTTP error occurred: {e}")
419
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
420
- except httpx.RequestError as e:
421
- logger.error(f"Error occurred during request: {e}")
422
- raise HTTPException(status_code=500, detail=str(e))
423
-
424
-
425
- async def process_non_streaming_response(request: ChatRequest):
426
- agent_mode = AGENT_MODE.get(request.model, {})
427
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
428
- json_data = {
429
- "messages": [message_to_dict(msg) for msg in request.messages],
430
- "previewToken": None,
431
- "userId": None,
432
- "codeModelMode": True,
433
- "agentMode": agent_mode,
434
- "trendingAgentMode": trending_agent_mode,
435
- "isMicMode": False,
436
- "userSystemPrompt": None,
437
- "maxTokens": request.max_tokens,
438
- "playgroundTopP": request.top_p,
439
- "playgroundTemperature": request.temperature,
440
- "isChromeExt": False,
441
- "githubToken": None,
442
- "clickedAnswer2": False,
443
- "clickedAnswer3": False,
444
- "clickedForceWebSearch": False,
445
- "visitFromDelta": False,
446
- "mobileClient": False,
447
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
448
- }
449
- full_response = ""
450
- async with httpx.AsyncClient() as client:
451
- try:
452
- async with client.stream(
453
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
454
- ) as response:
455
- response.raise_for_status()
456
- async for chunk in response.aiter_text():
457
- full_response += chunk
458
- except httpx.HTTPStatusError as e:
459
- logger.error(f"HTTP error occurred: {e}")
460
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
461
- except httpx.RequestError as e:
462
- logger.error(f"Error occurred during request: {e}")
463
- raise HTTPException(status_code=500, detail=str(e))
464
- if full_response.startswith("$@$v=undefined-rv1$@$"):
465
- full_response = full_response[21:]
466
-
467
- return {
468
- "id": f"chatcmpl-{uuid.uuid4()}",
469
- "object": "chat.completion",
470
- "created": int(datetime.now().timestamp()),
471
- "model": request.model,
472
- "choices": [
473
- {
474
- "index": 0,
475
- "message": {"role": "assistant", "content": full_response},
476
- "finish_reason": "stop",
477
- }
478
- ],
479
- "usage": None,
480
- }
 
1
+ from datetime import datetime
2
+ import json
3
+ from typing import Any, Dict, Optional
4
+ import uuid
5
+
6
+ import httpx
7
+ from api.config import MODEL_MAPPING, headers, AGENT_MODE, TRENDING_AGENT_MODE, BASE_URL
8
+ from fastapi import HTTPException
9
+ from api.models import ChatRequest
10
+
11
+ from api.logger import setup_logger
12
+
13
+ logger = setup_logger(__name__)
14
+
15
+
16
+ def create_chat_completion_data(
17
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
18
+ ) -> Dict[str, Any]:
19
+ return {
20
+ "id": f"chatcmpl-{uuid.uuid4()}",
21
+ "object": "chat.completion.chunk",
22
+ "created": timestamp,
23
+ "model": model,
24
+ "choices": [
25
+ {
26
+ "index": 0,
27
+ "delta": {"content": content, "role": "assistant"},
28
+ "finish_reason": finish_reason,
29
+ }
30
+ ],
31
+ "usage": None,
32
+ }
33
+
34
+
35
+ def message_to_dict(message):
36
+ if isinstance(message.content, str):
37
+ return {"role": message.role, "content": message.content}
38
+ elif isinstance(message.content, list) and len(message.content) == 2:
39
+ return {
40
+ "role": message.role,
41
+ "content": message.content[0]["text"],
42
+ "data": {
43
+ "imageBase64": message.content[1]["image_url"]["url"],
44
+ "fileText": "",
45
+ "title": "snapshot",
46
+ },
47
+ }
48
+ else:
49
+ return {"role": message.role, "content": message.content}
50
+
51
+
52
+ async def process_streaming_response(request: ChatRequest):
53
+ agent_mode = AGENT_MODE.get(request.model, {})
54
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
55
+ model = MODEL_MAPPING.get(request.model, request.model)
56
+
57
+ # Add prefix if applicable
58
+ prefix = ""
59
+ model_prefixes = {
60
+ 'gpt-4o': '@GPT-4o',
61
+ 'gemini-pro': '@Gemini-PRO',
62
+ 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
63
+ 'PythonAgent': '@Python Agent',
64
+ 'JavaAgent': '@Java Agent',
65
+ 'JavaScriptAgent': '@JavaScript Agent',
66
+ 'HTMLAgent': '@HTML Agent',
67
+ 'GoogleCloudAgent': '@Google Cloud Agent',
68
+ 'AndroidDeveloper': '@Android Developer',
69
+ 'SwiftDeveloper': '@Swift Developer',
70
+ 'Next.jsAgent': '@Next.js Agent',
71
+ 'MongoDBAgent': '@MongoDB Agent',
72
+ 'PyTorchAgent': '@PyTorch Agent',
73
+ 'ReactAgent': '@React Agent',
74
+ 'XcodeAgent': '@Xcode Agent',
75
+ 'AngularJSAgent': '@AngularJS Agent',
76
+ 'blackboxai-pro': '@BLACKBOXAI-PRO',
77
+ 'ImageGeneration': '@Image Generation',
78
+ }
79
+ prefix = model_prefixes.get(request.model, "")
80
+
81
+ # Format messages with prefix
82
+ formatted_messages = []
83
+ for msg in request.messages:
84
+ formatted_content = msg.content
85
+ if prefix and msg.role.lower() == "user":
86
+ formatted_content = f"{prefix} {formatted_content}"
87
+ formatted_messages.append({
88
+ "role": msg.role,
89
+ "content": formatted_content,
90
+ "data": msg.content.get('data') if isinstance(msg.content, dict) else None
91
+ })
92
+
93
+ json_data = {
94
+ "messages": formatted_messages,
95
+ "previewToken": None,
96
+ "userId": None,
97
+ "codeModelMode": True,
98
+ "agentMode": agent_mode,
99
+ "trendingAgentMode": trending_agent_mode,
100
+ "isMicMode": False,
101
+ "userSystemPrompt": None,
102
+ "maxTokens": request.max_tokens,
103
+ "playgroundTopP": request.top_p,
104
+ "playgroundTemperature": request.temperature,
105
+ "isChromeExt": False,
106
+ "githubToken": None,
107
+ "clickedAnswer2": False,
108
+ "clickedAnswer3": False,
109
+ "clickedForceWebSearch": False,
110
+ "visitFromDelta": False,
111
+ "mobileClient": False,
112
+ "webSearchMode": False, # Set to True if web search is needed
113
+ "userSelectedModel": model,
114
+ }
115
+
116
+ async with httpx.AsyncClient() as client:
117
+ try:
118
+ async with client.stream(
119
+ "POST",
120
+ f"{BASE_URL}/api/chat",
121
+ headers=headers,
122
+ json=json_data,
123
+ timeout=100,
124
+ ) as response:
125
+ response.raise_for_status()
126
+ async for line in response.aiter_lines():
127
+ timestamp = int(datetime.now().timestamp())
128
+ if line:
129
+ content = line
130
+ if content.startswith("$@$v=undefined-rv1$@$"):
131
+ content = content[21:]
132
+ yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
133
+
134
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
135
+ yield "data: [DONE]\n\n"
136
+ except httpx.HTTPStatusError as e:
137
+ logger.error(f"HTTP error occurred: {e}")
138
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
139
+ except httpx.RequestError as e:
140
+ logger.error(f"Error occurred during request: {e}")
141
+ raise HTTPException(status_code=500, detail=str(e))
142
+
143
+
144
+ async def process_non_streaming_response(request: ChatRequest):
145
+ agent_mode = AGENT_MODE.get(request.model, {})
146
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
147
+ model = MODEL_MAPPING.get(request.model, request.model)
148
+
149
+ # Add prefix if applicable
150
+ prefix = ""
151
+ model_prefixes = {
152
+ 'gpt-4o': '@GPT-4o',
153
+ 'gemini-pro': '@Gemini-PRO',
154
+ 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
155
+ 'PythonAgent': '@Python Agent',
156
+ 'JavaAgent': '@Java Agent',
157
+ 'JavaScriptAgent': '@JavaScript Agent',
158
+ 'HTMLAgent': '@HTML Agent',
159
+ 'GoogleCloudAgent': '@Google Cloud Agent',
160
+ 'AndroidDeveloper': '@Android Developer',
161
+ 'SwiftDeveloper': '@Swift Developer',
162
+ 'Next.jsAgent': '@Next.js Agent',
163
+ 'MongoDBAgent': '@MongoDB Agent',
164
+ 'PyTorchAgent': '@PyTorch Agent',
165
+ 'ReactAgent': '@React Agent',
166
+ 'XcodeAgent': '@Xcode Agent',
167
+ 'AngularJSAgent': '@AngularJS Agent',
168
+ 'blackboxai-pro': '@BLACKBOXAI-PRO',
169
+ 'ImageGeneration': '@Image Generation',
170
+ }
171
+ prefix = model_prefixes.get(request.model, "")
172
+
173
+ # Format messages with prefix
174
+ formatted_messages = []
175
+ for msg in request.messages:
176
+ formatted_content = msg.content
177
+ if prefix and msg.role.lower() == "user":
178
+ formatted_content = f"{prefix} {formatted_content}"
179
+ formatted_messages.append({
180
+ "role": msg.role,
181
+ "content": formatted_content,
182
+ "data": msg.content.get('data') if isinstance(msg.content, dict) else None
183
+ })
184
+
185
+ json_data = {
186
+ "messages": formatted_messages,
187
+ "previewToken": None,
188
+ "userId": None,
189
+ "codeModelMode": True,
190
+ "agentMode": agent_mode,
191
+ "trendingAgentMode": trending_agent_mode,
192
+ "isMicMode": False,
193
+ "userSystemPrompt": None,
194
+ "maxTokens": request.max_tokens,
195
+ "playgroundTopP": request.top_p,
196
+ "playgroundTemperature": request.temperature,
197
+ "isChromeExt": False,
198
+ "githubToken": None,
199
+ "clickedAnswer2": False,
200
+ "clickedAnswer3": False,
201
+ "clickedForceWebSearch": False,
202
+ "visitFromDelta": False,
203
+ "mobileClient": False,
204
+ "webSearchMode": False, # Set to True if web search is needed
205
+ "userSelectedModel": model,
206
+ }
207
+
208
+ full_response = ""
209
+ async with httpx.AsyncClient() as client:
210
+ try:
211
+ async with client.post(
212
+ f"{BASE_URL}/api/chat",
213
+ headers=headers,
214
+ json=json_data,
215
+ timeout=100,
216
+ ) as response:
217
+ response.raise_for_status()
218
+ full_response = await response.text()
219
+ except httpx.HTTPStatusError as e:
220
+ logger.error(f"HTTP error occurred: {e}")
221
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
222
+ except httpx.RequestError as e:
223
+ logger.error(f"Error occurred during request: {e}")
224
+ raise HTTPException(status_code=500, detail=str(e))
225
+
226
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
227
+ full_response = full_response[21:]
228
+
229
+ return {
230
+ "id": f"chatcmpl-{uuid.uuid4()}",
231
+ "object": "chat.completion",
232
+ "created": int(datetime.now().timestamp()),
233
+ "model": request.model,
234
+ "choices": [
235
+ {
236
+ "index": 0,
237
+ "message": {"role": "assistant", "content": full_response},
238
+ "finish_reason": "stop",
239
+ }
240
+ ],
241
+ "usage": None,
242
+ }