Niansuh commited on
Commit
88bc5e3
·
verified ·
1 Parent(s): 5f8de6f

Update api/provider/blackboxai.py

Browse files
Files changed (1) hide show
  1. api/provider/blackboxai.py +190 -100
api/provider/blackboxai.py CHANGED
@@ -1,108 +1,198 @@
1
- import os
2
- from dotenv import load_dotenv
 
 
3
 
4
- load_dotenv()
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- BASE_URL = "https://www.blackbox.ai"
7
- headers = {
8
- 'accept': '*/*',
9
- 'accept-language': 'en-US,en;q=0.9',
10
- 'origin': 'https://www.blackbox.ai',
11
- 'priority': 'u=1, i',
12
- 'sec-ch-ua': '"Chromium";v="130", "Google Chrome";v="130", "Not?A_Brand";v="99"',
13
- 'sec-ch-ua-mobile': '?0',
14
- 'sec-ch-ua-platform': '"Windows"',
15
- 'sec-fetch-dest': 'empty',
16
- 'sec-fetch-mode': 'cors',
17
- 'sec-fetch-site': 'same-origin',
18
- 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
19
- 'AppleWebKit/537.36 (KHTML, like Gecko) '
20
- 'Chrome/130.0.0.0 Safari/537.36',
21
- }
22
- APP_SECRET = os.getenv("APP_SECRET")
23
 
24
- ALLOWED_MODELS = [
25
- {"id": "blackboxai", "name": "blackboxai"},
26
- {"id": "blackboxai-pro", "name": "blackboxai-pro"},
27
- {"id": "flux", "name": "flux"},
28
- {"id": "llama-3.1-8b", "name": "llama-3.1-8b"},
29
- {"id": "llama-3.1-70b", "name": "llama-3.1-70b"},
30
- {"id": "llama-3.1-405b", "name": "llama-3.1-405b"},
31
- {"id": "gpt-4o", "name": "gpt-4o"},
32
- {"id": "gemini-pro", "name": "gemini-pro"},
33
- {"id": "gemini-1.5-flash", "name": "gemini-1.5-flash"},
34
- {"id": "claude-sonnet-3.5", "name": "claude-sonnet-3.5"},
35
- {"id": "PythonAgent", "name": "PythonAgent"},
36
- {"id": "JavaAgent", "name": "JavaAgent"},
37
- {"id": "JavaScriptAgent", "name": "JavaScriptAgent"},
38
- {"id": "HTMLAgent", "name": "HTMLAgent"},
39
- {"id": "GoogleCloudAgent", "name": "GoogleCloudAgent"},
40
- {"id": "AndroidDeveloper", "name": "AndroidDeveloper"},
41
- {"id": "SwiftDeveloper", "name": "SwiftDeveloper"},
42
- {"id": "Next.jsAgent", "name": "Next.jsAgent"},
43
- {"id": "MongoDBAgent", "name": "MongoDBAgent"},
44
- {"id": "PyTorchAgent", "name": "PyTorchAgent"},
45
- {"id": "ReactAgent", "name": "ReactAgent"},
46
- {"id": "XcodeAgent", "name": "XcodeAgent"},
47
- {"id": "AngularJSAgent", "name": "AngularJSAgent"},
48
- {"id": "RepoMap", "name": "RepoMap"},
49
- {"id": "gemini-1.5-pro-latest", "name": "gemini-pro"},
50
- {"id": "gemini-1.5-pro", "name": "gemini-1.5-pro"},
51
- {"id": "claude-3-5-sonnet-20240620", "name": "claude-sonnet-3.5"},
52
- {"id": "claude-3-5-sonnet", "name": "claude-sonnet-3.5"},
53
- {"id": "Niansuh", "name": "Niansuh"},
54
- # GizAI models
55
- {"id": "chat-gemini-flash", "name": "chat-gemini-flash"},
56
- {"id": "chat-gemini-pro", "name": "chat-gemini-pro"},
57
- {"id": "chat-gpt4m", "name": "chat-gpt4m"},
58
- {"id": "chat-gpt4", "name": "chat-gpt4"},
59
- {"id": "claude-sonnet", "name": "claude-sonnet"},
60
- {"id": "claude-haiku", "name": "claude-haiku"},
61
- {"id": "llama-3-70b", "name": "llama-3-70b"},
62
- {"id": "llama-3-8b", "name": "llama-3-8b"},
63
- {"id": "mistral-large", "name": "mistral-large"},
64
- {"id": "chat-o1-mini", "name": "chat-o1-mini"},
65
- {"id": "flux1", "name": "flux1"},
66
- {"id": "sdxl", "name": "sdxl"},
67
- {"id": "sd", "name": "sd"},
68
- {"id": "sd35", "name": "sd35"},
69
- ]
70
 
71
- MODEL_MAPPING = {
72
- # Existing mappings...
73
- # GizAI mappings
74
- "chat-gemini-flash": "chat-gemini-flash",
75
- "chat-gemini-pro": "chat-gemini-pro",
76
- "chat-gpt4m": "chat-gpt4m",
77
- "chat-gpt4": "chat-gpt4",
78
- "claude-sonnet": "claude-sonnet",
79
- "claude-haiku": "claude-haiku",
80
- "llama-3-70b": "llama-3-70b",
81
- "llama-3-8b": "llama-3-8b",
82
- "mistral-large": "mistral-large",
83
- "chat-o1-mini": "chat-o1-mini",
84
- "flux1": "flux1",
85
- "sdxl": "sdxl",
86
- "sd": "sd",
87
- "sd35": "sd35",
88
- # Add any additional mappings if necessary
89
- }
 
 
90
 
91
- # Agent modes (existing)
92
- AGENT_MODE = {
93
- 'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"},
94
- 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "Niansuh"},
95
- }
 
 
96
 
97
- TRENDING_AGENT_MODE = {
98
- # Existing entries...
99
- }
 
 
 
100
 
101
- # Model prefixes
102
- MODEL_PREFIXES = {
103
- # Existing entries...
104
- # Add any additional prefixes if necessary
105
- }
106
 
107
- # Model referers
108
- MODEL_REFERERS =
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import uuid
2
+ from datetime import datetime
3
+ import json
4
+ from typing import Any, Dict, Optional
5
 
6
+ import httpx
7
+ from fastapi import HTTPException
8
+ from api.models import ChatRequest
9
+ from api.config import (
10
+ MODEL_MAPPING,
11
+ headers,
12
+ AGENT_MODE,
13
+ TRENDING_AGENT_MODE,
14
+ BASE_URL,
15
+ MODEL_PREFIXES,
16
+ MODEL_REFERERS
17
+ )
18
+ from api.logger import setup_logger
19
 
20
+ logger = setup_logger(__name__)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
+ def create_chat_completion_data(
23
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
24
+ ) -> Dict[str, Any]:
25
+ return {
26
+ "id": f"chatcmpl-{uuid.uuid4()}",
27
+ "object": "chat.completion.chunk",
28
+ "created": timestamp,
29
+ "model": model,
30
+ "choices": [
31
+ {
32
+ "index": 0,
33
+ "delta": {"content": content, "role": "assistant"},
34
+ "finish_reason": finish_reason,
35
+ }
36
+ ],
37
+ "usage": None,
38
+ }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ def message_to_dict(message, model_prefix: Optional[str] = None):
41
+ if isinstance(message.content, str):
42
+ content = message.content
43
+ if model_prefix:
44
+ content = f"{model_prefix} {content}"
45
+ return {"role": message.role, "content": content}
46
+ elif isinstance(message.content, list) and len(message.content) == 2:
47
+ content = message.content[0]["text"]
48
+ if model_prefix:
49
+ content = f"{model_prefix} {content}"
50
+ return {
51
+ "role": message.role,
52
+ "content": content,
53
+ "data": {
54
+ "imageBase64": message.content[1]["image_url"]["url"],
55
+ "fileText": "",
56
+ "title": "snapshot",
57
+ },
58
+ }
59
+ else:
60
+ return {"role": message.role, "content": message.content}
61
 
62
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
63
+ """Remove the model prefix from the response content if present."""
64
+ if model_prefix and content.startswith(model_prefix):
65
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
66
+ return content[len(model_prefix):].strip()
67
+ logger.debug("No prefix to strip from content.")
68
+ return content
69
 
70
+ async def process_streaming_response(request: ChatRequest):
71
+ agent_mode = AGENT_MODE.get(request.model, {})
72
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
73
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
74
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
75
+ referer_url = f"{BASE_URL}{referer_path}"
76
 
77
+ # Update headers with dynamic Referer
78
+ dynamic_headers = headers.copy()
79
+ dynamic_headers['Referer'] = referer_url
 
 
80
 
81
+ json_data = {
82
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
83
+ "previewToken": None,
84
+ "userId": None,
85
+ "codeModelMode": True,
86
+ "agentMode": agent_mode,
87
+ "trendingAgentMode": trending_agent_mode,
88
+ "isMicMode": False,
89
+ "userSystemPrompt": None,
90
+ "maxTokens": request.max_tokens,
91
+ "playgroundTopP": request.top_p,
92
+ "playgroundTemperature": request.temperature,
93
+ "isChromeExt": False,
94
+ "githubToken": None,
95
+ "clickedAnswer2": False,
96
+ "clickedAnswer3": False,
97
+ "clickedForceWebSearch": False,
98
+ "visitFromDelta": False,
99
+ "mobileClient": False,
100
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
101
+ }
102
+
103
+ async with httpx.AsyncClient() as client:
104
+ try:
105
+ async with client.stream(
106
+ "POST",
107
+ f"{BASE_URL}/api/chat",
108
+ headers=dynamic_headers,
109
+ json=json_data,
110
+ timeout=100,
111
+ ) as response:
112
+ response.raise_for_status()
113
+ async for line in response.aiter_lines():
114
+ timestamp = int(datetime.now().timestamp())
115
+ if line:
116
+ content = line
117
+ if content.startswith("$@$v=undefined-rv1$@$"):
118
+ content = content[21:]
119
+ # Strip the model prefix from the response content
120
+ cleaned_content = strip_model_prefix(content, model_prefix)
121
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
122
+
123
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
124
+ yield "data: [DONE]\n\n"
125
+ except httpx.HTTPStatusError as e:
126
+ logger.error(f"HTTP error occurred: {e}")
127
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
128
+ except httpx.RequestError as e:
129
+ logger.error(f"Error occurred during request: {e}")
130
+ raise HTTPException(status_code=500, detail=str(e))
131
+
132
+ async def process_non_streaming_response(request: ChatRequest):
133
+ agent_mode = AGENT_MODE.get(request.model, {})
134
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
135
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
136
+ referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
137
+ referer_url = f"{BASE_URL}{referer_path}"
138
+
139
+ # Update headers with dynamic Referer
140
+ dynamic_headers = headers.copy()
141
+ dynamic_headers['Referer'] = referer_url
142
+
143
+ json_data = {
144
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
145
+ "previewToken": None,
146
+ "userId": None,
147
+ "codeModelMode": True,
148
+ "agentMode": agent_mode,
149
+ "trendingAgentMode": trending_agent_mode,
150
+ "isMicMode": False,
151
+ "userSystemPrompt": None,
152
+ "maxTokens": request.max_tokens,
153
+ "playgroundTopP": request.top_p,
154
+ "playgroundTemperature": request.temperature,
155
+ "isChromeExt": False,
156
+ "githubToken": None,
157
+ "clickedAnswer2": False,
158
+ "clickedAnswer3": False,
159
+ "clickedForceWebSearch": False,
160
+ "visitFromDelta": False,
161
+ "mobileClient": False,
162
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
163
+ }
164
+ full_response = ""
165
+ async with httpx.AsyncClient() as client:
166
+ try:
167
+ async with client.stream(
168
+ method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
169
+ ) as response:
170
+ response.raise_for_status()
171
+ async for chunk in response.aiter_text():
172
+ full_response += chunk
173
+ except httpx.HTTPStatusError as e:
174
+ logger.error(f"HTTP error occurred: {e}")
175
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
176
+ except httpx.RequestError as e:
177
+ logger.error(f"Error occurred during request: {e}")
178
+ raise HTTPException(status_code=500, detail=str(e))
179
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
180
+ full_response = full_response[21:]
181
+
182
+ # Strip the model prefix from the full response
183
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
184
+
185
+ return {
186
+ "id": f"chatcmpl-{uuid.uuid4()}",
187
+ "object": "chat.completion",
188
+ "created": int(datetime.now().timestamp()),
189
+ "model": request.model,
190
+ "choices": [
191
+ {
192
+ "index": 0,
193
+ "message": {"role": "assistant", "content": cleaned_full_response},
194
+ "finish_reason": "stop",
195
+ }
196
+ ],
197
+ "usage": None,
198
+ }