rkihacker commited on
Commit
d021a5a
·
verified ·
1 Parent(s): 33251e2

Upload 15 files

Browse files
.dockerignore ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ __pycache__
2
+ *.pyc
3
+ *.pyo
4
+ *.pyd
5
+ *.db
6
+ *.sqlite3
7
+ .env
8
+ .git
9
+ .gitignore
10
+ Dockerfile
.github/workflows/docker-deploy.yml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Docker Build and Push
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main # Triggers the workflow when changes are pushed to the 'main' branch
7
+ pull_request:
8
+ branches:
9
+ - main # Optional: Trigger on pull requests to 'main' for testing
10
+
11
+ jobs:
12
+ build:
13
+ runs-on: ubuntu-latest
14
+
15
+ steps:
16
+ # Step 1: Check out the repository
17
+ - name: Checkout code
18
+ uses: actions/checkout@v3 # Check out your GitHub repository
19
+
20
+ # Step 2: Set up Docker Buildx
21
+ - name: Set up Docker Buildx
22
+ uses: docker/setup-buildx-action@v2 # Set up Docker Buildx to support advanced features like multi-platform builds
23
+ with:
24
+ install: true
25
+
26
+ # Step 3: Log in to Docker Hub
27
+ - name: Log in to Docker Hub
28
+ uses: docker/login-action@v2 # Logs in to Docker Hub
29
+ with:
30
+ username: ${{ secrets.DOCKER_USERNAME }} # Your Docker Hub username stored as a GitHub secret
31
+ password: ${{ secrets.DOCKER_PASSWORD }} # Your Docker Hub password stored as a GitHub secret
32
+
33
+ # Step 4: Build and push the Docker image
34
+ - name: Build and Push Docker Image
35
+ uses: docker/build-push-action@v5 # Build and push the Docker image
36
+ with:
37
+ context: . # The context is the root of your repository
38
+ push: true # Automatically push the image after building
39
+ tags: ${{ secrets.DOCKER_USERNAME }}/blackboxv2:v786 # Replace 'your-app-name' with your desired Docker image name
40
+
41
+ # Step 5: Log out of Docker Hub
42
+ - name: Log out of Docker Hub
43
+ run: docker logout
Dockerfile ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python 3.10 slim image
2
+ FROM python:3.10-slim
3
+
4
+ # Set environment variables to prevent Python from writing pyc files and to buffer stdout/stderr
5
+ ENV PYTHONDONTWRITEBYTECODE=1
6
+ ENV PYTHONUNBUFFERED=1
7
+
8
+ # Set the working directory to /app
9
+ WORKDIR /app
10
+
11
+ # Install system dependencies to get `nproc` (for number of CPU cores)
12
+ RUN apt-get update && apt-get install -y procps
13
+
14
+ # Copy only the requirements.txt first to leverage Docker cache
15
+ COPY requirements.txt .
16
+
17
+ # Install Python dependencies
18
+ RUN pip install --no-cache-dir --upgrade pip
19
+ RUN pip install --no-cache-dir -r requirements.txt
20
+
21
+ # Copy the rest of the application code
22
+ COPY . .
23
+
24
+ # Expose port 8001 to the outside world
25
+ EXPOSE 8001
26
+
27
+ # Command to run Uvicorn with a dynamic number of workers based on the CPU cores
28
+ CMD ["sh", "-c", "uvicorn main:app --host 0.0.0.0 --port 8001 --workers $(nproc)"]
README.md CHANGED
@@ -1,10 +1 @@
1
- ---
2
- title: Test101
3
- emoji: 👀
4
- colorFrom: pink
5
- colorTo: red
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ # Blackboxv2
 
 
 
 
 
 
 
 
 
api/__init__.py ADDED
File without changes
api/app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, Request
2
+ from starlette.middleware.cors import CORSMiddleware
3
+ from fastapi.responses import JSONResponse
4
+ from api.logger import setup_logger
5
+ from api.routes import router
6
+
7
+
8
+ logger = setup_logger(__name__)
9
+
10
+ def create_app():
11
+ app = FastAPI(
12
+ title="NiansuhAI API Gateway",
13
+ docs_url=None, # Disable Swagger UI
14
+ redoc_url=None, # Disable ReDoc
15
+ openapi_url=None, # Disable OpenAPI schema
16
+ )
17
+
18
+ # CORS settings
19
+ app.add_middleware(
20
+ CORSMiddleware,
21
+ allow_origins=["*"], # Adjust as needed for security
22
+ allow_credentials=True,
23
+ allow_methods=["*"],
24
+ allow_headers=["*"],
25
+ )
26
+
27
+ # Include routes
28
+ app.include_router(router)
29
+
30
+ # Global exception handler for better error reporting
31
+ @app.exception_handler(Exception)
32
+ async def global_exception_handler(request: Request, exc: Exception):
33
+ logger.error(f"An error occurred: {str(exc)}")
34
+ return JSONResponse(
35
+ status_code=500,
36
+ content={"message": "An internal server error occurred."},
37
+ )
38
+
39
+ return app
40
+
41
+ app = create_app()
api/auth.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import Depends, HTTPException
2
+ from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
3
+ from api.config import APP_SECRET
4
+
5
+ security = HTTPBearer()
6
+
7
+ def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(security)):
8
+ if credentials.credentials != APP_SECRET:
9
+ raise HTTPException(status_code=403, detail="Invalid APP_SECRET")
10
+ return credentials.credentials
api/config.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+
4
+ load_dotenv()
5
+
6
+ BASE_URL = "https://www.blackbox.ai"
7
+ common_headers = {
8
+ 'accept': '*/*',
9
+ 'accept-language': 'en-US,en;q=0.9',
10
+ 'content-type': 'application/json',
11
+ 'origin': BASE_URL,
12
+ 'priority': 'u=1, i',
13
+ 'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
14
+ 'sec-ch-ua-arch': '"x86"',
15
+ 'sec-ch-ua-bitness': '"64"',
16
+ 'sec-ch-ua-full-version': '"131.0.6778.86"',
17
+ 'sec-ch-ua-full-version-list': '"Google Chrome";v="131.0.6778.86", "Chromium";v="131.0.6778.86", "Not_A Brand";v="24.0.0.0"',
18
+ 'sec-ch-ua-mobile': '?0',
19
+ 'sec-ch-ua-model': '""',
20
+ 'sec-ch-ua-platform': '"Windows"',
21
+ 'sec-ch-ua-platform-version': '"19.0.0"',
22
+ 'sec-fetch-dest': 'empty',
23
+ 'sec-fetch-mode': 'cors',
24
+ 'sec-fetch-site': 'same-origin',
25
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
26
+ }
27
+ # Header Configurations for Specific API Calls
28
+ def get_headers_api_chat(referer_url):
29
+ return {**common_headers, 'Content-Type': 'application/json', 'Referer': referer_url}
30
+
31
+ def get_headers_chat(chat_url, next_action, next_router_state_tree):
32
+ return {
33
+ **common_headers,
34
+ 'Accept': 'text/x-component',
35
+ 'Content-Type': 'text/plain;charset=UTF-8',
36
+ 'Referer': chat_url,
37
+ 'next-action': next_action,
38
+ 'next-router-state-tree': next_router_state_tree,
39
+ 'next-url': '/',
40
+ }
41
+
42
+ APP_SECRET = os.getenv("APP_SECRET")
43
+
44
+ ALLOWED_MODELS = [
45
+ {"id": "blackboxai", "name": "blackboxai"},
46
+ {"id": "blackboxai-pro", "name": "blackboxai-pro"},
47
+ {"id": "flux", "name": "flux"},
48
+ {"id": "llama-3.1-8b", "name": "llama-3.1-8b"},
49
+ {"id": "llama-3.1-70b", "name": "llama-3.1-70b"},
50
+ {"id": "llama-3.1-405b", "name": "llama-3.1-405"},
51
+ {"id": "gpt-4o", "name": "gpt-4o"},
52
+ {"id": "gpt-4o-2024-05-13", "name": "gpt-4o"},
53
+ {"id": "gemini-pro", "name": "gemini-pro"},
54
+ {"id": "gemini-1.5-flash", "name": "gemini-1.5-flash"},
55
+ {"id": "claude-sonnet-3.5", "name": "claude-sonnet-3.5"},
56
+ {"id": "Meta-Llama-3.3-70B-Instruct-Turbo", "name": "Meta-Llama-3.3-70B-Instruct-Turbo"},
57
+ {"id": "Mistral-7B-Instruct-v0.2", "name": "Mistral-7B-Instruct-v0.2"},
58
+ {"id": "deepseek-llm-67b-chat", "name": "deepseek-llm-67b-chat"},
59
+ {"id": "dbrx-instruct", "name": "dbrx-instruct"},
60
+ {"id": "Meta-Llama-3.1-405B-Instruct-Turbo", "name": "Meta-Llama-3.1-405B-Instruct-Turbo"},
61
+ {"id": "Qwen-QwQ-32B-Preview", "name": "Qwen-QwQ-32B-Preview"},
62
+ {"id": "Nous-Hermes-2-Mixtral-8x7B-DPO", "name": "Nous-Hermes-2-Mixtral-8x7B-DPO"},
63
+ {"id": "PythonAgent", "name": "python"},
64
+ {"id": "JavaAgent", "name": "java"},
65
+ {"id": "JavaScriptAgent", "name": "javascript"},
66
+ {"id": "HTMLAgent", "name": "html"},
67
+ {"id": "GoogleCloudAgent", "name": "googlecloud"},
68
+ {"id": "AndroidDeveloper", "name": "androiddeveloper"},
69
+ {"id": "SwiftDeveloper", "name": "swiftdeveloper"},
70
+ {"id": "Next.jsAgent", "name": "next.js"},
71
+ {"id": "MongoDBAgent", "name": "mongodb"},
72
+ {"id": "PyTorchAgent", "name": "pytorch"},
73
+ {"id": "ReactAgent", "name": "react"},
74
+ {"id": "XcodeAgent", "name": "xcode"},
75
+ {"id": "AngularJSAgent", "name": "angularjs"},
76
+ {"id": "HerokuAgent", "name": "heroku"},
77
+ {"id": "GodotAgent", "name": "godot"},
78
+ {"id": "GoAgent", "name": "go"},
79
+ {"id": "GitlabAgent", "name": "gitlab"},
80
+ {"id": "GitAgent", "name": "git"},
81
+ {"id": "RepoMap", "name": "repomap"},
82
+ {"id": "gemini-1.5-pro-latest", "name": "gemini-pro"},
83
+ {"id": "gemini-1.5-pro", "name": "gemini-pro"},
84
+ {"id": "claude-3-5-sonnet-20240620", "name": "claude-sonnet-3.5"},
85
+ {"id": "claude-3-5-sonnet", "name": "claude-sonnet-3.5"},
86
+ {"id": "Niansuh", "name": "niansuh"},
87
+ {"id": "o1-preview", "name": "o1-preview"},
88
+ {"id": "claude-3-5-sonnet-20241022", "name": "claude-3-5-sonnet-20241022"},
89
+ {"id": "claude-3-5-sonnet-x", "name": "claude-3-5-sonnet-x"},
90
+ {"id": "gpt-3.5-turbo", "name": "gpt-3.5-turbo"},
91
+ {"id": "gpt-3.5-turbo-202201", "name": "gpt-3.5-turbo-202201"},
92
+
93
+ # Added New Agents
94
+ {"id": "FlaskAgent", "name": "flask"},
95
+ {"id": "FirebaseAgent", "name": "firebase"},
96
+ {"id": "FastAPIAgent", "name": "fastapi"},
97
+ {"id": "ErlangAgent", "name": "erlang"},
98
+ {"id": "ElectronAgent", "name": "electron"},
99
+ {"id": "DockerAgent", "name": "docker"},
100
+ {"id": "DigitalOceanAgent", "name": "digitalocean"},
101
+ {"id": "BitbucketAgent", "name": "bitbucket"},
102
+ {"id": "AzureAgent", "name": "azure"},
103
+ {"id": "FlutterAgent", "name": "flutter"},
104
+ {"id": "YoutubeAgent", "name": "youtube"},
105
+ {"id": "builderAgent", "name": "builder"},
106
+ ]
107
+
108
+ MODEL_MAPPING = {
109
+ "blackboxai": "blackboxai",
110
+ "blackboxai-pro": "blackboxai-pro",
111
+ "flux": "flux",
112
+ "ImageGeneration": "flux",
113
+ "llama-3.1-8b": "llama-3.1-8b",
114
+ "llama-3.1-70b": "llama-3.1-70b",
115
+ "llama-3.1-405b": "llama-3.1-405",
116
+ "gpt-4o": "gpt-4o",
117
+ "gpt-4o-2024-05-13": "gpt-4o",
118
+ "gemini-pro": "gemini-pro",
119
+ "gemini-1.5-flash": "gemini-1.5-flash",
120
+ "claude-sonnet-3.5": "claude-sonnet-3.5",
121
+ "Meta-Llama-3.3-70B-Instruct-Turbo": "Meta-Llama-3.3-70B-Instruct-Turbo",
122
+ "Mistral-7B-Instruct-v0.2": "Mistral-7B-Instruct-v0.2",
123
+ "deepseek-llm-67b-chat": "deepseek-llm-67b-chat",
124
+ "dbrx-instruct": "dbrx-instruct",
125
+ "Meta-Llama-3.1-405B-Instruct-Turbo": "Meta-Llama-3.1-405B-Instruct-Turbo",
126
+ "Qwen-QwQ-32B-Preview": "Qwen-QwQ-32B-Preview",
127
+ "Nous-Hermes-2-Mixtral-8x7B-DPO": "Nous-Hermes-2-Mixtral-8x7B-DPO",
128
+ "PythonAgent": "python",
129
+ "JavaAgent": "java",
130
+ "JavaScriptAgent": "javascript",
131
+ "HTMLAgent": "html",
132
+ "GoogleCloudAgent": "googlecloud",
133
+ "AndroidDeveloper": "androiddeveloper",
134
+ "SwiftDeveloper": "swiftdeveloper",
135
+ "Next.jsAgent": "next.js",
136
+ "MongoDBAgent": "mongodb",
137
+ "PyTorchAgent": "pytorch",
138
+ "ReactAgent": "react",
139
+ "XcodeAgent": "xcode",
140
+ "AngularJSAgent": "angularjs",
141
+ "HerokuAgent": "heroku",
142
+ "GodotAgent": "godot",
143
+ "GoAgent": "go",
144
+ "GitlabAgent": "gitlab",
145
+ "GitAgent": "git",
146
+ "RepoMap": "repomap",
147
+ # Additional mappings
148
+ "gemini-flash": "gemini-1.5-flash",
149
+ "claude-3.5-sonnet": "claude-sonnet-3.5",
150
+ "gemini-1.5-pro-latest": "gemini-pro",
151
+ "gemini-1.5-pro": "gemini-pro",
152
+ "claude-3-5-sonnet-20240620": "claude-sonnet-3.5",
153
+ "claude-3-5-sonnet": "claude-sonnet-3.5",
154
+ "Niansuh": "niansuh",
155
+ "o1-preview": "o1-preview",
156
+ "claude-3-5-sonnet-20241022": "claude-3-5-sonnet-20241022",
157
+ "claude-3-5-sonnet-x": "claude-3-5-sonnet-x",
158
+ "gpt-3.5-turbo": "gpt-3.5-turbo",
159
+ "gpt-3.5-turbo-202201": "gpt-3.5-turbo-202201",
160
+
161
+ # Added New Agents
162
+ "FlaskAgent": "flask",
163
+ "FirebaseAgent": "firebase",
164
+ "FastAPIAgent": "fastapi",
165
+ "ErlangAgent": "erlang",
166
+ "ElectronAgent": "electron",
167
+ "DockerAgent": "docker",
168
+ "DigitalOceanAgent": "digitalocean",
169
+ "BitbucketAgent": "bitbucket",
170
+ "AzureAgent": "azure",
171
+ "FlutterAgent": "flutter",
172
+ "YoutubeAgent": "youtube",
173
+ "builderAgent": "builder",
174
+ }
175
+
176
+ # Agent modes
177
+ AGENT_MODE = {
178
+ 'flux': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "flux"},
179
+ 'Niansuh': {'mode': True, 'id': "NiansuhAIk1HgESy", 'name': "niansuh"},
180
+ 'o1-preview': {'mode': True, 'id': "o1Dst8La8", 'name': "o1-preview"},
181
+ 'claude-3-5-sonnet-20241022': {'mode': True, 'id': "Claude-Sonnet-3.5zO2HZSF", 'name': "claude-3-5-sonnet-20241022"},
182
+ 'claude-3-5-sonnet-x': {'mode': True, 'id': "Claude-Sonnet-3.52022JE0UdQ3", 'name': "claude-3-5-sonnet-x"},
183
+ 'gpt-3.5-turbo': {'mode': True, 'id': "GPT-3.5-TurboYxtGz0H", 'name': "gpt-3.5-turbo"},
184
+ 'gpt-3.5-turbo-202201': {'mode': True, 'id': "GPT-3.5-Turbo-202201PNWREyV", 'name': "gpt-3.5-turbo-202201"},
185
+ 'Meta-Llama-3.3-70B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Llama-3.3-70B-Instruct-Turbo", 'name': "Meta-Llama-3.3-70B-Instruct-Turbo"},
186
+ 'Mistral-7B-Instruct-v0.2': {'mode': True, 'id': "mistralai/Mistral-7B-Instruct-v0.2", 'name': "Mistral-7B-Instruct-v0.2"},
187
+ 'deepseek-llm-67b-chat': {'mode': True, 'id': "deepseek-ai/deepseek-llm-67b-chat", 'name': "deepseek-llm-67b-chat"},
188
+ 'dbrx-instruct': {'mode': True, 'id': "databricks/dbrx-instruct", 'name': "dbrx-instruct"},
189
+ 'Meta-Llama-3.1-405B-Instruct-Turbo': {'mode': True, 'id': "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro", 'name': "Meta-Llama-3.1-405B-Instruct-Turbo"},
190
+ 'Qwen-QwQ-32B-Preview': {'mode': True, 'id': "Qwen/QwQ-32B-Preview", 'name': "Qwen-QwQ-32B-Preview"},
191
+ 'Nous-Hermes-2-Mixtral-8x7B-DPO': {'mode': True, 'id': "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", 'name': "Nous-Hermes-2-Mixtral-8x7B-DPO"},
192
+
193
+ }
194
+
195
+ TRENDING_AGENT_MODE = {
196
+ "blackboxai": {},
197
+ "gemini-1.5-flash": {'mode': True, 'id': 'gemini'},
198
+ "llama-3.1-8b": {'mode': True, 'id': "llama-3.1-8b"},
199
+ 'llama-3.1-70b': {'mode': True, 'id': "llama-3.1-70b"},
200
+ 'llama-3.1-405b': {'mode': True, 'id': "llama-3.1-405"},
201
+ 'blackboxai-pro': {'mode': True, 'id': "blackboxai-pro"},
202
+ 'PythonAgent': {'mode': True, 'id': "python"},
203
+ 'JavaAgent': {'mode': True, 'id': "java"},
204
+ 'JavaScriptAgent': {'mode': True, 'id': "javascript"},
205
+ 'HTMLAgent': {'mode': True, 'id': "html"},
206
+ 'GoogleCloudAgent': {'mode': True, 'id': "googlecloud"},
207
+ 'AndroidDeveloper': {'mode': True, 'id': "android"},
208
+ 'SwiftDeveloper': {'mode': True, 'id': "swift"},
209
+ 'Next.jsAgent': {'mode': True, 'id': "next.js"},
210
+ 'MongoDBAgent': {'mode': True, 'id': "mongodb"},
211
+ 'PyTorchAgent': {'mode': True, 'id': "pytorch"},
212
+ 'ReactAgent': {'mode': True, 'id': "react"},
213
+ 'XcodeAgent': {'mode': True, 'id': "xcode"},
214
+ 'AngularJSAgent': {'mode': True, 'id': "angularjs"},
215
+ 'HerokuAgent': {'mode': True, 'id': "heroku"},
216
+ 'GodotAgent': {'mode': True, 'id': "godot"},
217
+ 'GoAgent': {'mode': True, 'id': "go"},
218
+ 'GitlabAgent': {'mode': True, 'id': "gitlab"},
219
+ 'GitAgent': {'mode': True, 'id': "git"},
220
+ 'RepoMap': {'mode': True, 'id': "repomap"},
221
+ 'FlaskAgent': {'mode': True, 'id': "flask"},
222
+ 'FirebaseAgent': {'mode': True, 'id': "firebase"},
223
+ 'FastAPIAgent': {'mode': True, 'id': "fastapi"},
224
+ 'ErlangAgent': {'mode': True, 'id': "erlang"},
225
+ 'ElectronAgent': {'mode': True, 'id': "electron"},
226
+ 'DockerAgent': {'mode': True, 'id': "docker"},
227
+ 'DigitalOceanAgent': {'mode': True, 'id': "digitalocean"},
228
+ 'BitbucketAgent': {'mode': True, 'id': "bitbucket"},
229
+ 'AzureAgent': {'mode': True, 'id': "azure"},
230
+ 'FlutterAgent': {'mode': True, 'id': "flutter"},
231
+ 'YoutubeAgent': {'mode': True, 'id': "youtube"},
232
+ 'builderAgent': {'mode': True, 'id': "builder"},
233
+ }
234
+
235
+ # Model prefixes
236
+ MODEL_PREFIXES = {
237
+ 'gpt-4o': '@GPT-4o',
238
+ 'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
239
+ 'gemini-pro': '@Gemini-PRO',
240
+ 'PythonAgent': '@python',
241
+ 'JavaAgent': '@java',
242
+ 'JavaScriptAgent': '@javascript',
243
+ 'HTMLAgent': '@html',
244
+ 'GoogleCloudAgent': '@googlecloud',
245
+ 'AndroidDeveloper': '@android',
246
+ 'SwiftDeveloper': '@swift',
247
+ 'Next.jsAgent': '@next.js',
248
+ 'MongoDBAgent': '@mongodb',
249
+ 'PyTorchAgent': '@pytorch',
250
+ 'ReactAgent': '@react',
251
+ 'XcodeAgent': '@xcode',
252
+ 'AngularJSAgent': '@angularjs',
253
+ 'HerokuAgent': '@heroku',
254
+ 'GodotAgent': '@godot',
255
+ 'GoAgent': '@go',
256
+ 'GitlabAgent': '@gitlab',
257
+ 'GitAgent': '@git',
258
+ 'blackboxai-pro': '@blackboxai-pro',
259
+ 'FlaskAgent': '@flask',
260
+ 'FirebaseAgent': '@firebase',
261
+ 'FastAPIAgent': '@fastapi',
262
+ 'ErlangAgent': '@erlang',
263
+ 'ElectronAgent': '@electron',
264
+ 'DockerAgent': '@docker',
265
+ 'DigitalOceanAgent': '@digitalocean',
266
+ 'BitbucketAgent': '@bitbucket',
267
+ 'AzureAgent': '@azure',
268
+ 'FlutterAgent': '@flutter',
269
+ 'YoutubeAgent': '@youtube',
270
+ 'builderAgent': '@builder',
271
+ }
272
+
273
+ # Model referers
274
+ MODEL_REFERERS = {
275
+ "blackboxai": "/?model=blackboxai",
276
+ "gpt-4o": "/?model=gpt-4o",
277
+ "gemini-pro": "/?model=gemini-pro",
278
+ "claude-sonnet-3.5": "/?model=claude-sonnet-3.5",
279
+ "blackboxai-pro": "/?model=blackboxai-pro",
280
+ }
api/logger.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+
3
+ # Setup logger with a consistent format
4
+ def setup_logger(name):
5
+ logger = logging.getLogger(name)
6
+ if not logger.handlers:
7
+ logger.setLevel(logging.INFO)
8
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
9
+
10
+ # Console handler
11
+ console_handler = logging.StreamHandler()
12
+ console_handler.setFormatter(formatter)
13
+ logger.addHandler(console_handler)
14
+
15
+ # File Handler - Error Level
16
+ # error_file_handler = logging.FileHandler('error.log')
17
+ # error_file_handler.setFormatter(formatter)
18
+ # error_file_handler.setLevel(logging.ERROR)
19
+ # logger.addHandler(error_file_handler)
20
+
21
+ return logger
22
+
23
+ logger = setup_logger(__name__)
24
+
25
+ # Log functions to structure specific logs in utils.py
26
+ def log_generated_chat_id_with_referer(chat_id, model, referer_url):
27
+ """
28
+ Log the generated Chat ID with model and referer URL if it exists.
29
+ """
30
+ logger.info(f"Generated Chat ID: {chat_id} - Model: {model} - URL: {referer_url}")
31
+
32
+ def log_model_delay(delay_seconds, model, chat_id):
33
+ """
34
+ Log the delay introduced for specific models.
35
+ """
36
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model '{model}' (Chat ID: {chat_id})")
37
+
38
+ def log_http_error(error, chat_id):
39
+ """
40
+ Log HTTP errors encountered during requests.
41
+ """
42
+ logger.error(f"HTTP error occurred for Chat ID {chat_id}: {error}")
43
+
44
+ def log_request_error(error, chat_id):
45
+ """
46
+ Log request errors unrelated to HTTP status.
47
+ """
48
+ logger.error(f"Request error occurred for Chat ID {chat_id}: {error}")
49
+
50
+ def log_strip_prefix(model_prefix, content):
51
+ """
52
+ Log when a model prefix is stripped from the content.
53
+ """
54
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
api/models.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Optional
2
+ from pydantic import BaseModel
3
+
4
+ class Message(BaseModel):
5
+ role: str
6
+ content: str | list
7
+
8
+ class ChatRequest(BaseModel):
9
+ model: str
10
+ messages: List[Message]
11
+ stream: Optional[bool] = False
12
+ temperature: Optional[float] = 0.5
13
+ top_p: Optional[float] = 0.9
14
+ max_tokens: Optional[int] = 99999999
api/routes.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from fastapi import APIRouter, Depends, HTTPException, Request, Response
3
+ from fastapi.responses import StreamingResponse
4
+ from api.auth import verify_app_secret
5
+ from api.config import ALLOWED_MODELS
6
+ from api.models import ChatRequest
7
+ from api.utils import process_non_streaming_response, process_streaming_response
8
+ from api.logger import setup_logger
9
+
10
+ logger = setup_logger(__name__)
11
+
12
+ router = APIRouter()
13
+
14
+ @router.options("/v1/chat/completions")
15
+ @router.options("/api/v1/chat/completions")
16
+ async def chat_completions_options():
17
+ return Response(
18
+ status_code=200,
19
+ headers={
20
+ "Access-Control-Allow-Origin": "*",
21
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
22
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
23
+ },
24
+ )
25
+
26
+ @router.get("/v1/models")
27
+ @router.get("/api/v1/models")
28
+ async def list_models():
29
+ return {"object": "list", "data": ALLOWED_MODELS}
30
+
31
+ @router.post("/v1/chat/completions")
32
+ @router.post("/api/v1/chat/completions")
33
+ async def chat_completions(
34
+ request: ChatRequest, app_secret: str = Depends(verify_app_secret)
35
+ ):
36
+ logger.info("Entering chat_completions route")
37
+ logger.info(f"Processing chat completion request for model: {request.model}")
38
+
39
+ if request.model not in [model["id"] for model in ALLOWED_MODELS]:
40
+ raise HTTPException(
41
+ status_code=400,
42
+ detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
43
+ )
44
+
45
+ if request.stream:
46
+ logger.info("Streaming response")
47
+ return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
48
+ else:
49
+ logger.info("Non-streaming response")
50
+ return await process_non_streaming_response(request)
51
+
52
+ @router.route('/')
53
+ @router.route('/healthz')
54
+ @router.route('/ready')
55
+ @router.route('/alive')
56
+ @router.route('/status')
57
+ @router.get("/health")
58
+ def health_check(request: Request):
59
+ return Response(
60
+ content=json.dumps({
61
+ "message": "Welcome to the NiansuhAI API!",
62
+ "inspiration": "Failure is the first step to success."
63
+ }),
64
+ media_type="application/json",
65
+ status_code=421 # Changing the status code to 421
66
+ )
api/utils.py ADDED
@@ -0,0 +1,418 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import json
3
+ import uuid
4
+ import asyncio
5
+ import random
6
+ import string
7
+ from typing import Any, Dict, Optional
8
+
9
+ import httpx
10
+ from fastapi import HTTPException
11
+ from api.config import (
12
+ MODEL_MAPPING,
13
+ get_headers_api_chat,
14
+ get_headers_chat,
15
+ BASE_URL,
16
+ AGENT_MODE,
17
+ TRENDING_AGENT_MODE,
18
+ MODEL_PREFIXES,
19
+ MODEL_REFERERS
20
+ )
21
+ from api.models import ChatRequest
22
+ from api.logger import setup_logger
23
+ from api.validate import getHid # Import the asynchronous getHid function
24
+ import tiktoken
25
+
26
+ logger = setup_logger(__name__)
27
+
28
+ # Define the blocked message
29
+ BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai and for API requests replace https://www.blackbox.ai with https://api.blackbox.ai"
30
+
31
+ # Function to calculate tokens using tiktoken
32
+ def calculate_tokens(text: str, model: str) -> int:
33
+ try:
34
+ encoding = tiktoken.encoding_for_model(model)
35
+ tokens = encoding.encode(text)
36
+ return len(tokens)
37
+ except KeyError:
38
+ # Handle the case where the model is not supported by tiktoken
39
+ logger.warning(f"Model '{model}' not supported by tiktoken for token counting. Using a generic method.")
40
+ return len(text.split())
41
+
42
+ # Helper function to create chat completion data
43
+ def create_chat_completion_data(
44
+ content: str, model: str, timestamp: int, request_id: str, prompt_tokens: int = 0, completion_tokens: int = 0, finish_reason: Optional[str] = None
45
+ ) -> Dict[str, Any]:
46
+ if finish_reason == "stop":
47
+ usage = {
48
+ "prompt_tokens": prompt_tokens,
49
+ "completion_tokens": completion_tokens,
50
+ "total_tokens": prompt_tokens + completion_tokens,
51
+ }
52
+ else:
53
+ usage = None
54
+ return {
55
+ "id": request_id,
56
+ "object": "chat.completion.chunk",
57
+ "created": timestamp,
58
+ "model": model,
59
+ "choices": [
60
+ {
61
+ "index": 0,
62
+ "delta": {"content": content, "role": "assistant"},
63
+ "finish_reason": finish_reason,
64
+ }
65
+ ],
66
+ "usage": usage,
67
+ }
68
+
69
+ # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
70
+ def message_to_dict(message, model_prefix: Optional[str] = None):
71
+ content = message.content if isinstance(message.content, str) else message.content[0]["text"]
72
+ if model_prefix:
73
+ content = f"{model_prefix} {content}"
74
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
75
+ # Ensure base64 images are always included for all models
76
+ image_base64 = message.content[1]["image_url"]["url"]
77
+ return {
78
+ "role": message.role,
79
+ "content": content,
80
+ "data": {
81
+ "imageBase64": image_base64,
82
+ "fileText": "",
83
+ "title": "snapshot",
84
+ # Added imagesData field here
85
+ "imagesData": [
86
+ {
87
+ "filePath": f"MultipleFiles/{uuid.uuid4().hex}.jpg",
88
+ "contents": image_base64
89
+ }
90
+ ],
91
+ },
92
+ }
93
+ return {"role": message.role, "content": content}
94
+
95
+ # Function to strip model prefix from content if present
96
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
97
+ """Remove the model prefix from the response content if present."""
98
+ if model_prefix and content.startswith(model_prefix):
99
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
100
+ return content[len(model_prefix):].strip()
101
+ return content
102
+
103
+ # Process streaming response with headers from config.py
104
+ async def process_streaming_response(request: ChatRequest):
105
+ # Generate a unique ID for this request
106
+ request_id = f"chatcmpl-{uuid.uuid4()}"
107
+ logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
108
+
109
+ # Get the appropriate configuration for the requested model
110
+ agent_mode = AGENT_MODE.get(request.model, {})
111
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
112
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
113
+
114
+ # Adjust headers_api_chat since referer_url is removed
115
+ headers_api_chat = get_headers_api_chat(BASE_URL)
116
+
117
+ if request.model == 'o1-preview':
118
+ delay_seconds = random.randint(1, 60)
119
+ logger.info(
120
+ f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' "
121
+ f"(Request ID: {request_id})"
122
+ )
123
+ await asyncio.sleep(delay_seconds)
124
+
125
+ # Fetch the h-value for the 'validated' field
126
+ h_value = await getHid()
127
+ if not h_value:
128
+ logger.error("Failed to retrieve h-value for validation.")
129
+ raise HTTPException(
130
+ status_code=500, detail="Validation failed due to missing h-value."
131
+ )
132
+
133
+ messages = [
134
+ message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
135
+ ]
136
+
137
+ json_data = {
138
+ "agentMode": agent_mode,
139
+ "clickedAnswer2": False,
140
+ "clickedAnswer3": False,
141
+ "clickedForceWebSearch": False,
142
+ "codeModelMode": True,
143
+ "githubToken": None,
144
+ "id": request_id,
145
+ "isChromeExt": False,
146
+ "isMicMode": False,
147
+ "maxTokens": request.max_tokens,
148
+ "messages": messages,
149
+ "mobileClient": False,
150
+ "playgroundTemperature": request.temperature,
151
+ "playgroundTopP": request.top_p,
152
+ "previewToken": None,
153
+ "trendingAgentMode": trending_agent_mode,
154
+ "userId": None,
155
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
156
+ "userSystemPrompt": None,
157
+ "validated": h_value, # Dynamically set the validated field
158
+ "visitFromDelta": False,
159
+ "webSearchModePrompt": False,
160
+ "imageGenerationMode": False, # Added this line
161
+ }
162
+
163
+ prompt_tokens = 0
164
+ for message in messages:
165
+ if 'content' in message:
166
+ prompt_tokens += calculate_tokens(message['content'], request.model)
167
+ if 'data' in message and 'imageBase64' in message['data']:
168
+ prompt_tokens += calculate_tokens(message['data']['imageBase64'], request.model)
169
+
170
+ completion_tokens = 0
171
+ async with httpx.AsyncClient() as client:
172
+ try:
173
+ async with client.stream(
174
+ "POST",
175
+ f"{BASE_URL}/api/chat",
176
+ headers=headers_api_chat,
177
+ json=json_data,
178
+ timeout=100,
179
+ ) as response:
180
+ response.raise_for_status()
181
+ async for chunk in response.aiter_text():
182
+ timestamp = int(datetime.now().timestamp())
183
+ if chunk:
184
+ content = chunk
185
+ if content.startswith("$@$v=undefined-rv1$@$"):
186
+ content = content[21:]
187
+ # Remove the blocked message if present
188
+ if BLOCKED_MESSAGE in content:
189
+ logger.info(
190
+ f"Blocked message detected in response for Request ID {request_id}."
191
+ )
192
+ content = content.replace(BLOCKED_MESSAGE, '').strip()
193
+ if not content:
194
+ continue # Skip if content is empty after removal
195
+ cleaned_content = strip_model_prefix(content, model_prefix)
196
+ completion_tokens += calculate_tokens(cleaned_content, request.model)
197
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp, request_id))}\n\n"
198
+
199
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'stop'))}\n\n"
200
+ yield "data: [DONE]\n\n"
201
+ except httpx.HTTPStatusError as e:
202
+ logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
203
+ error_message = f"HTTP error occurred: {e}"
204
+ try:
205
+ error_details = e.response.json()
206
+ error_message += f" Details: {error_details}"
207
+ except ValueError:
208
+ error_message += f" Response body: {e.response.text}"
209
+
210
+ yield f"data: {json.dumps(create_chat_completion_data(error_message, request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'error'))}\n\n"
211
+ yield "data: [DONE]\n\n"
212
+ # raise HTTPException(status_code=e.response.status_code, detail=error_message)
213
+ except httpx.RequestError as e:
214
+ logger.error(
215
+ f"Error occurred during request for Request ID {request_id}: {e}"
216
+ )
217
+ error_message = f"Request error occurred: {e}"
218
+ yield f"data: {json.dumps(create_chat_completion_data(error_message, request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'error'))}\n\n"
219
+ yield "data: [DONE]\n\n"
220
+ # raise HTTPException(status_code=500, detail=error_message)
221
+ except Exception as e:
222
+ logger.error(f"An unexpected error occurred for Request ID {request_id}: {e}")
223
+ error_message = f"An unexpected error occurred: {e}"
224
+ yield f"data: {json.dumps(create_chat_completion_data(error_message, request.model, timestamp, request_id, prompt_tokens, completion_tokens, 'error'))}\n\n"
225
+ yield "data: [DONE]\n\n"
226
+ # raise HTTPException(status_code=500, detail=error_message)
227
+
228
+ # Process non-streaming response with headers from config.py
229
+ async def process_non_streaming_response(request: ChatRequest):
230
+ # Generate a unique ID for this request
231
+ request_id = f"chatcmpl-{uuid.uuid4()}"
232
+ logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
233
+
234
+ # Get the appropriate configuration for the requested model
235
+ agent_mode = AGENT_MODE.get(request.model, {})
236
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
237
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
238
+
239
+ # Adjust headers_api_chat and headers_chat since referer_url is removed
240
+ headers_api_chat = get_headers_api_chat(BASE_URL)
241
+ headers_chat = get_headers_chat(
242
+ BASE_URL,
243
+ next_action=str(uuid.uuid4()),
244
+ next_router_state_tree=json.dumps([""]),
245
+ )
246
+
247
+ if request.model == 'o1-preview':
248
+ delay_seconds = random.randint(20, 60)
249
+ logger.info(
250
+ f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' "
251
+ f"(Request ID: {request_id})"
252
+ )
253
+ await asyncio.sleep(delay_seconds)
254
+
255
+ # Fetch the h-value for the 'validated' field
256
+ h_value = "00f37b34-a166-4efb-bce5-1312d87f2f94"
257
+ if not h_value:
258
+ logger.error("Failed to retrieve h-value for validation.")
259
+ raise HTTPException(
260
+ status_code=500, detail="Validation failed due to missing h-value."
261
+ )
262
+
263
+ messages = [
264
+ message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
265
+ ]
266
+
267
+ json_data = {
268
+ "agentMode": agent_mode,
269
+ "clickedAnswer2": False,
270
+ "clickedAnswer3": False,
271
+ "clickedForceWebSearch": False,
272
+ "codeModelMode": True,
273
+ "githubToken": None,
274
+ "id": request_id,
275
+ "isChromeExt": False,
276
+ "isMicMode": False,
277
+ "maxTokens": request.max_tokens,
278
+ "messages": messages,
279
+ "mobileClient": False,
280
+ "playgroundTemperature": request.temperature,
281
+ "playgroundTopP": request.top_p,
282
+ "previewToken": None,
283
+ "trendingAgentMode": trending_agent_mode,
284
+ "userId": None,
285
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
286
+ "userSystemPrompt": None,
287
+ "validated": h_value, # Dynamically set the validated field
288
+ "visitFromDelta": False,
289
+ "webSearchModePrompt": False,
290
+ "imageGenerationMode": False, # Added this line
291
+ }
292
+
293
+ prompt_tokens = 0
294
+ for message in messages:
295
+ if 'content' in message:
296
+ prompt_tokens += calculate_tokens(message['content'], request.model)
297
+ if 'data' in message and 'imageBase64' in message['data']:
298
+ prompt_tokens += calculate_tokens(message['data']['imageBase64'], request.model)
299
+
300
+ full_response = ""
301
+ async with httpx.AsyncClient() as client:
302
+ try:
303
+ async with client.stream(
304
+ method="POST",
305
+ url=f"{BASE_URL}/api/chat",
306
+ headers=headers_api_chat,
307
+ json=json_data,
308
+ ) as response:
309
+ response.raise_for_status()
310
+ async for chunk in response.aiter_text():
311
+ full_response += chunk
312
+ except httpx.HTTPStatusError as e:
313
+ logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
314
+ error_message = f"HTTP error occurred: {e}"
315
+ try:
316
+ error_details = e.response.json()
317
+ error_message += f" Details: {error_details}"
318
+ except ValueError:
319
+ error_message += f" Response body: {e.response.text}"
320
+
321
+ return {
322
+ "id": request_id,
323
+ "object": "chat.completion",
324
+ "created": int(datetime.now().timestamp()),
325
+ "model": request.model,
326
+ "choices": [
327
+ {
328
+ "index": 0,
329
+ "message": {"role": "assistant", "content": error_message},
330
+ "finish_reason": "error",
331
+ }
332
+ ],
333
+ "usage": {
334
+ "prompt_tokens": prompt_tokens,
335
+ "completion_tokens": 0,
336
+ "total_tokens": prompt_tokens,
337
+ },
338
+ }
339
+ except httpx.RequestError as e:
340
+ logger.error(
341
+ f"Error occurred during request for Request ID {request_id}: {e}"
342
+ )
343
+ error_message = f"Request error occurred: {e}"
344
+ return {
345
+ "id": request_id,
346
+ "object": "chat.completion",
347
+ "created": int(datetime.now().timestamp()),
348
+ "model": request.model,
349
+ "choices": [
350
+ {
351
+ "index": 0,
352
+ "message": {"role": "assistant", "content": error_message},
353
+ "finish_reason": "error",
354
+ }
355
+ ],
356
+ "usage": {
357
+ "prompt_tokens": prompt_tokens,
358
+ "completion_tokens": 0,
359
+ "total_tokens": prompt_tokens,
360
+ },
361
+ }
362
+ except Exception as e:
363
+ logger.error(f"An unexpected error occurred for Request ID {request_id}: {e}")
364
+ error_message = f"An unexpected error occurred: {e}"
365
+ return {
366
+ "id": request_id,
367
+ "object": "chat.completion",
368
+ "created": int(datetime.now().timestamp()),
369
+ "model": request.model,
370
+ "choices": [
371
+ {
372
+ "index": 0,
373
+ "message": {"role": "assistant", "content": error_message},
374
+ "finish_reason": "error",
375
+ }
376
+ ],
377
+ "usage": {
378
+ "prompt_tokens": prompt_tokens,
379
+ "completion_tokens": 0,
380
+ "total_tokens": prompt_tokens,
381
+ },
382
+ }
383
+
384
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
385
+ full_response = full_response[21:]
386
+
387
+ # Remove the blocked message if present
388
+ if BLOCKED_MESSAGE in full_response:
389
+ logger.info(
390
+ f"Blocked message detected in response for Request ID {request_id}."
391
+ )
392
+ full_response = full_response.replace(BLOCKED_MESSAGE, '').strip()
393
+ if not full_response:
394
+ raise HTTPException(
395
+ status_code=500, detail="Blocked message detected in response."
396
+ )
397
+
398
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
399
+ completion_tokens = calculate_tokens(cleaned_full_response, request.model)
400
+
401
+ return {
402
+ "id": request_id,
403
+ "object": "chat.completion",
404
+ "created": int(datetime.now().timestamp()),
405
+ "model": request.model,
406
+ "choices": [
407
+ {
408
+ "index": 0,
409
+ "message": {"role": "assistant", "content": cleaned_full_response},
410
+ "finish_reason": "stop",
411
+ }
412
+ ],
413
+ "usage": {
414
+ "prompt_tokens": prompt_tokens,
415
+ "completion_tokens": completion_tokens,
416
+ "total_tokens": prompt_tokens + completion_tokens,
417
+ },
418
+ }
api/validate.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import time
3
+ import asyncio
4
+ import aiohttp
5
+ from typing import Optional
6
+
7
+ base_url = "https://www.blackbox.ai"
8
+ headers = {
9
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
10
+ }
11
+
12
+ # Cache variables
13
+ cached_hid = None
14
+ cache_time = 0
15
+ CACHE_DURATION = 36000 # Cache duration in seconds (10 hours)
16
+
17
+ async def getHid(force_refresh: bool = False) -> Optional[str]:
18
+ global cached_hid, cache_time
19
+ current_time = time.time()
20
+
21
+ # Check if a forced refresh is needed or if the cached values are still valid.
22
+ if not force_refresh and cached_hid and (current_time - cache_time) < CACHE_DURATION:
23
+ print("Using cached_hid:", cached_hid)
24
+ return cached_hid
25
+
26
+ uuid_format = r'["\']([0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12})["\']'
27
+
28
+ def is_valid_context(text_around):
29
+ return any(char + '=' in text_around for char in 'abcdefghijklmnopqrstuvwxyz')
30
+
31
+ try:
32
+ async with aiohttp.ClientSession(headers=headers) as session:
33
+ async with session.get(base_url) as response:
34
+ if response.status != 200:
35
+ print("Failed to load the page.")
36
+ return None
37
+
38
+ page_content = await response.text()
39
+ js_files = re.findall(r'static/chunks/\d{4}-[a-fA-F0-9]+\.js', page_content)
40
+
41
+ for js_file in js_files:
42
+ js_url = f"{base_url}/_next/{js_file}"
43
+ async with session.get(js_url) as js_response:
44
+ if js_response.status == 200:
45
+ js_content = await js_response.text()
46
+ for match in re.finditer(uuid_format, js_content):
47
+ start = max(0, match.start() - 10)
48
+ end = min(len(js_content), match.end() + 10)
49
+ context = js_content[start:end]
50
+
51
+ if is_valid_context(context):
52
+ validated_value = match.group(1)
53
+ print("Found and validated h-value:", validated_value)
54
+ # Update the cache
55
+ cached_hid = validated_value
56
+ cache_time = current_time
57
+ return validated_value
58
+ print("The h-value was not found in any JS content.")
59
+ return None
60
+ except Exception as e:
61
+ print(f"An error occurred during the request: {e}")
62
+ return None
main.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import uvicorn
2
+ from api.app import app
3
+
4
+ if __name__ == "__main__":
5
+ uvicorn.run(app, host="0.0.0.0", port=8001)
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ fastapi==0.95.2
2
+ httpx==0.23.3
3
+ pydantic==1.10.4
4
+ python-dotenv==0.21.0
5
+ uvicorn==0.21.1
6
+ aiohttp
7
+ tiktoken