Niansuh commited on
Commit
381d345
·
verified ·
1 Parent(s): 3839cf0

Upload 14 files

Browse files
.github/workflows/docker-deploy.yml CHANGED
@@ -36,7 +36,7 @@ jobs:
36
  with:
37
  context: . # The context is the root of your repository
38
  push: true # Automatically push the image after building
39
- tags: ${{ secrets.DOCKER_USERNAME }}/blackboxv2:v100 # Replace 'your-app-name' with your desired Docker image name
40
 
41
  # Step 5: Log out of Docker Hub
42
  - name: Log out of Docker Hub
 
36
  with:
37
  context: . # The context is the root of your repository
38
  push: true # Automatically push the image after building
39
+ tags: ${{ secrets.DOCKER_USERNAME }}/blackboxv2:v786 # Replace 'your-app-name' with your desired Docker image name
40
 
41
  # Step 5: Log out of Docker Hub
42
  - name: Log out of Docker Hub
Dockerfile CHANGED
@@ -8,9 +8,8 @@ ENV PYTHONUNBUFFERED=1
8
  # Set the working directory to /app
9
  WORKDIR /app
10
 
11
- # Install system dependencies (if any)
12
- # For example, if you need gcc for some packages, uncomment the following line
13
- # RUN apt-get update && apt-get install -y gcc
14
 
15
  # Copy only the requirements.txt first to leverage Docker cache
16
  COPY requirements.txt .
@@ -25,5 +24,5 @@ COPY . .
25
  # Expose port 8001 to the outside world
26
  EXPOSE 8001
27
 
28
- # Command to run the FastAPI app with Uvicorn
29
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8001"]
 
8
  # Set the working directory to /app
9
  WORKDIR /app
10
 
11
+ # Install system dependencies to get `nproc` (for number of CPU cores)
12
+ RUN apt-get update && apt-get install -y procps
 
13
 
14
  # Copy only the requirements.txt first to leverage Docker cache
15
  COPY requirements.txt .
 
24
  # Expose port 8001 to the outside world
25
  EXPOSE 8001
26
 
27
+ # Command to run Uvicorn with a dynamic number of workers based on the CPU cores
28
+ CMD ["sh", "-c", "uvicorn main:app --host 0.0.0.0 --port 8001 --workers $(nproc)"]
api/config.py CHANGED
@@ -3,21 +3,20 @@ from dotenv import load_dotenv
3
 
4
  load_dotenv()
5
 
6
- # Base URL and Common Headers
7
  BASE_URL = "https://www.blackbox.ai"
8
  common_headers = {
9
  'accept': '*/*',
10
  'accept-language': 'en-US,en;q=0.9',
11
- 'cache-control': 'no-cache',
12
  'origin': BASE_URL,
13
- 'pragma': 'no-cache',
14
  'priority': 'u=1, i',
15
  'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
16
  'sec-ch-ua-arch': '"x86"',
17
  'sec-ch-ua-bitness': '"64"',
18
- 'sec-ch-ua-full-version': '"131.0.6778.69"',
19
- 'sec-ch-ua-full-version-list': '"Google Chrome";v="131.0.6778.69", "Chromium";v="131.0.6778.69", "Not_A Brand";v="24.0.0.0"',
20
  'sec-ch-ua-mobile': '?0',
 
21
  'sec-ch-ua-platform': '"Windows"',
22
  'sec-ch-ua-platform-version': '"19.0.0"',
23
  'sec-fetch-dest': 'empty',
@@ -25,7 +24,6 @@ common_headers = {
25
  'sec-fetch-site': 'same-origin',
26
  'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
27
  }
28
-
29
  # Header Configurations for Specific API Calls
30
  def get_headers_api_chat(referer_url):
31
  return {**common_headers, 'Content-Type': 'application/json', 'Referer': referer_url}
@@ -83,6 +81,7 @@ ALLOWED_MODELS = [
83
  {"id": "claude-3-5-sonnet-20241022", "name": "claude-3-5-sonnet-20241022"},
84
  {"id": "claude-3-5-sonnet-x", "name": "claude-3-5-sonnet-x"},
85
  {"id": "gpt-3.5-turbo", "name": "gpt-3.5-turbo"},
 
86
 
87
  # Added New Agents
88
  {"id": "FlaskAgent", "name": "flask"},
@@ -143,6 +142,7 @@ MODEL_MAPPING = {
143
  "claude-3-5-sonnet-20241022": "claude-3-5-sonnet-20241022",
144
  "claude-3-5-sonnet-x": "claude-3-5-sonnet-x",
145
  "gpt-3.5-turbo": "gpt-3.5-turbo",
 
146
 
147
  # Added New Agents
148
  "FlaskAgent": "flask",
@@ -167,6 +167,7 @@ AGENT_MODE = {
167
  'claude-3-5-sonnet-20241022': {'mode': True, 'id': "Claude-Sonnet-3.5zO2HZSF", 'name': "claude-3-5-sonnet-20241022"},
168
  'claude-3-5-sonnet-x': {'mode': True, 'id': "Claude-Sonnet-3.52022JE0UdQ3", 'name': "claude-3-5-sonnet-x"},
169
  'gpt-3.5-turbo': {'mode': True, 'id': "GPT-3.5-TurboYxtGz0H", 'name': "gpt-3.5-turbo"},
 
170
  }
171
 
172
  TRENDING_AGENT_MODE = {
@@ -212,8 +213,8 @@ TRENDING_AGENT_MODE = {
212
  # Model prefixes
213
  MODEL_PREFIXES = {
214
  'gpt-4o': '@GPT-4o',
215
- 'gemini-pro': '@Gemini-PRO',
216
  'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
 
217
  'PythonAgent': '@python',
218
  'JavaAgent': '@java',
219
  'JavaScriptAgent': '@javascript',
@@ -233,7 +234,6 @@ MODEL_PREFIXES = {
233
  'GitlabAgent': '@gitlab',
234
  'GitAgent': '@git',
235
  'blackboxai-pro': '@blackboxai-pro',
236
- 'flux': '@Image Generation',
237
  'FlaskAgent': '@flask',
238
  'FirebaseAgent': '@firebase',
239
  'FastAPIAgent': '@fastapi',
@@ -256,4 +256,3 @@ MODEL_REFERERS = {
256
  "claude-sonnet-3.5": "/?model=claude-sonnet-3.5",
257
  "blackboxai-pro": "/?model=blackboxai-pro",
258
  }
259
-
 
3
 
4
  load_dotenv()
5
 
 
6
  BASE_URL = "https://www.blackbox.ai"
7
  common_headers = {
8
  'accept': '*/*',
9
  'accept-language': 'en-US,en;q=0.9',
10
+ 'content-type': 'application/json',
11
  'origin': BASE_URL,
 
12
  'priority': 'u=1, i',
13
  'sec-ch-ua': '"Google Chrome";v="131", "Chromium";v="131", "Not_A Brand";v="24"',
14
  'sec-ch-ua-arch': '"x86"',
15
  'sec-ch-ua-bitness': '"64"',
16
+ 'sec-ch-ua-full-version': '"131.0.6778.86"',
17
+ 'sec-ch-ua-full-version-list': '"Google Chrome";v="131.0.6778.86", "Chromium";v="131.0.6778.86", "Not_A Brand";v="24.0.0.0"',
18
  'sec-ch-ua-mobile': '?0',
19
+ 'sec-ch-ua-model': '""',
20
  'sec-ch-ua-platform': '"Windows"',
21
  'sec-ch-ua-platform-version': '"19.0.0"',
22
  'sec-fetch-dest': 'empty',
 
24
  'sec-fetch-site': 'same-origin',
25
  'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
26
  }
 
27
  # Header Configurations for Specific API Calls
28
  def get_headers_api_chat(referer_url):
29
  return {**common_headers, 'Content-Type': 'application/json', 'Referer': referer_url}
 
81
  {"id": "claude-3-5-sonnet-20241022", "name": "claude-3-5-sonnet-20241022"},
82
  {"id": "claude-3-5-sonnet-x", "name": "claude-3-5-sonnet-x"},
83
  {"id": "gpt-3.5-turbo", "name": "gpt-3.5-turbo"},
84
+ {"id": "gpt-3.5-turbo-202201", "name": "gpt-3.5-turbo-202201"},
85
 
86
  # Added New Agents
87
  {"id": "FlaskAgent", "name": "flask"},
 
142
  "claude-3-5-sonnet-20241022": "claude-3-5-sonnet-20241022",
143
  "claude-3-5-sonnet-x": "claude-3-5-sonnet-x",
144
  "gpt-3.5-turbo": "gpt-3.5-turbo",
145
+ "gpt-3.5-turbo-202201": "gpt-3.5-turbo-202201",
146
 
147
  # Added New Agents
148
  "FlaskAgent": "flask",
 
167
  'claude-3-5-sonnet-20241022': {'mode': True, 'id': "Claude-Sonnet-3.5zO2HZSF", 'name': "claude-3-5-sonnet-20241022"},
168
  'claude-3-5-sonnet-x': {'mode': True, 'id': "Claude-Sonnet-3.52022JE0UdQ3", 'name': "claude-3-5-sonnet-x"},
169
  'gpt-3.5-turbo': {'mode': True, 'id': "GPT-3.5-TurboYxtGz0H", 'name': "gpt-3.5-turbo"},
170
+ 'gpt-3.5-turbo-202201': {'mode': True, 'id': "GPT-3.5-Turbo-202201PNWREyV", 'name': "gpt-3.5-turbo-202201"},
171
  }
172
 
173
  TRENDING_AGENT_MODE = {
 
213
  # Model prefixes
214
  MODEL_PREFIXES = {
215
  'gpt-4o': '@GPT-4o',
 
216
  'claude-sonnet-3.5': '@Claude-Sonnet-3.5',
217
+ 'gemini-pro': '@Gemini-PRO',
218
  'PythonAgent': '@python',
219
  'JavaAgent': '@java',
220
  'JavaScriptAgent': '@javascript',
 
234
  'GitlabAgent': '@gitlab',
235
  'GitAgent': '@git',
236
  'blackboxai-pro': '@blackboxai-pro',
 
237
  'FlaskAgent': '@flask',
238
  'FirebaseAgent': '@firebase',
239
  'FastAPIAgent': '@fastapi',
 
256
  "claude-sonnet-3.5": "/?model=claude-sonnet-3.5",
257
  "blackboxai-pro": "/?model=blackboxai-pro",
258
  }
 
api/routes.py CHANGED
@@ -1,59 +1,66 @@
1
- import json
2
- from fastapi import APIRouter, Depends, HTTPException, Request, Response
3
- from fastapi.responses import StreamingResponse
4
- from api.auth import verify_app_secret
5
- from api.config import ALLOWED_MODELS
6
- from api.models import ChatRequest
7
- from api.utils import process_non_streaming_response, process_streaming_response
8
- from api.logger import setup_logger
9
-
10
- logger = setup_logger(__name__)
11
-
12
- router = APIRouter()
13
-
14
- @router.options("/v1/chat/completions")
15
- @router.options("/api/v1/chat/completions")
16
- async def chat_completions_options():
17
- return Response(
18
- status_code=200,
19
- headers={
20
- "Access-Control-Allow-Origin": "*",
21
- "Access-Control-Allow-Methods": "POST, OPTIONS",
22
- "Access-Control-Allow-Headers": "Content-Type, Authorization",
23
- },
24
- )
25
-
26
- @router.get("/v1/models")
27
- @router.get("/api/v1/models")
28
- async def list_models():
29
- return {"object": "list", "data": ALLOWED_MODELS}
30
-
31
- @router.post("/v1/chat/completions")
32
- @router.post("/api/v1/chat/completions")
33
- async def chat_completions(
34
- request: ChatRequest, app_secret: str = Depends(verify_app_secret)
35
- ):
36
- logger.info("Entering chat_completions route")
37
- logger.info(f"Processing chat completion request for model: {request.model}")
38
-
39
- if request.model not in [model["id"] for model in ALLOWED_MODELS]:
40
- raise HTTPException(
41
- status_code=400,
42
- detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
43
- )
44
-
45
- if request.stream:
46
- logger.info("Streaming response")
47
- return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
48
- else:
49
- logger.info("Non-streaming response")
50
- return await process_non_streaming_response(request)
51
-
52
- @router.route('/')
53
- @router.route('/healthz')
54
- @router.route('/ready')
55
- @router.route('/alive')
56
- @router.route('/status')
57
- @router.get("/health")
58
- def health_check(request: Request):
59
- return Response(content=json.dumps({"status": "ok"}), media_type="application/json")
 
 
 
 
 
 
 
 
1
+ import json
2
+ from fastapi import APIRouter, Depends, HTTPException, Request, Response
3
+ from fastapi.responses import StreamingResponse
4
+ from api.auth import verify_app_secret
5
+ from api.config import ALLOWED_MODELS
6
+ from api.models import ChatRequest
7
+ from api.utils import process_non_streaming_response, process_streaming_response
8
+ from api.logger import setup_logger
9
+
10
+ logger = setup_logger(__name__)
11
+
12
+ router = APIRouter()
13
+
14
+ @router.options("/v1/chat/completions")
15
+ @router.options("/api/v1/chat/completions")
16
+ async def chat_completions_options():
17
+ return Response(
18
+ status_code=200,
19
+ headers={
20
+ "Access-Control-Allow-Origin": "*",
21
+ "Access-Control-Allow-Methods": "POST, OPTIONS",
22
+ "Access-Control-Allow-Headers": "Content-Type, Authorization",
23
+ },
24
+ )
25
+
26
+ @router.get("/v1/models")
27
+ @router.get("/api/v1/models")
28
+ async def list_models():
29
+ return {"object": "list", "data": ALLOWED_MODELS}
30
+
31
+ @router.post("/v1/chat/completions")
32
+ @router.post("/api/v1/chat/completions")
33
+ async def chat_completions(
34
+ request: ChatRequest, app_secret: str = Depends(verify_app_secret)
35
+ ):
36
+ logger.info("Entering chat_completions route")
37
+ logger.info(f"Processing chat completion request for model: {request.model}")
38
+
39
+ if request.model not in [model["id"] for model in ALLOWED_MODELS]:
40
+ raise HTTPException(
41
+ status_code=400,
42
+ detail=f"Model {request.model} is not allowed. Allowed models are: {', '.join(model['id'] for model in ALLOWED_MODELS)}",
43
+ )
44
+
45
+ if request.stream:
46
+ logger.info("Streaming response")
47
+ return StreamingResponse(process_streaming_response(request), media_type="text/event-stream")
48
+ else:
49
+ logger.info("Non-streaming response")
50
+ return await process_non_streaming_response(request)
51
+
52
+ @router.route('/')
53
+ @router.route('/healthz')
54
+ @router.route('/ready')
55
+ @router.route('/alive')
56
+ @router.route('/status')
57
+ @router.get("/health")
58
+ def health_check(request: Request):
59
+ return Response(
60
+ content=json.dumps({
61
+ "message": "Welcome to the NiansuhAI API!",
62
+ "inspiration": "Failure is the first step to success."
63
+ }),
64
+ media_type="application/json",
65
+ status_code=421 # Changing the status code to 421
66
+ )
api/utils.py CHANGED
@@ -1,247 +1,248 @@
1
- from datetime import datetime
2
- import json
3
- import uuid
4
- import asyncio
5
- import random
6
- import string
7
- from typing import Any, Dict, Optional
8
-
9
- import httpx
10
- from fastapi import HTTPException
11
- from api.config import (
12
- MODEL_MAPPING,
13
- get_headers_api_chat,
14
- get_headers_chat,
15
- BASE_URL,
16
- AGENT_MODE,
17
- TRENDING_AGENT_MODE,
18
- MODEL_PREFIXES,
19
- MODEL_REFERERS
20
- )
21
- from api.models import ChatRequest
22
- from api.logger import setup_logger
23
- from api.validate import getHid # Import the asynchronous getHid function
24
-
25
- logger = setup_logger(__name__)
26
-
27
- # Define the blocked message
28
- BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai"
29
-
30
- # Helper function to create chat completion data
31
- def create_chat_completion_data(
32
- content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
33
- ) -> Dict[str, Any]:
34
- return {
35
- "id": f"chatcmpl-{uuid.uuid4()}",
36
- "object": "chat.completion.chunk",
37
- "created": timestamp,
38
- "model": model,
39
- "choices": [
40
- {
41
- "index": 0,
42
- "delta": {"content": content, "role": "assistant"},
43
- "finish_reason": finish_reason,
44
- }
45
- ],
46
- "usage": None,
47
- }
48
-
49
- # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
50
- def message_to_dict(message, model_prefix: Optional[str] = None):
51
- content = message.content if isinstance(message.content, str) else message.content[0]["text"]
52
- if model_prefix:
53
- content = f"{model_prefix} {content}"
54
- if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
55
- # Ensure base64 images are always included for all models
56
- return {
57
- "role": message.role,
58
- "content": content,
59
- "data": {
60
- "imageBase64": message.content[1]["image_url"]["url"],
61
- "fileText": "",
62
- "title": "snapshot",
63
- },
64
- }
65
- return {"role": message.role, "content": content}
66
-
67
- # Function to strip model prefix from content if present
68
- def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
69
- """Remove the model prefix from the response content if present."""
70
- if model_prefix and content.startswith(model_prefix):
71
- logger.debug(f"Stripping prefix '{model_prefix}' from content.")
72
- return content[len(model_prefix):].strip()
73
- return content
74
-
75
- # Process streaming response with headers from config.py
76
- async def process_streaming_response(request: ChatRequest):
77
- # Generate a unique ID for this request
78
- request_id = f"chatcmpl-{uuid.uuid4()}"
79
- logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
80
-
81
- agent_mode = AGENT_MODE.get(request.model, {})
82
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
83
- model_prefix = MODEL_PREFIXES.get(request.model, "")
84
-
85
- # Adjust headers_api_chat since referer_url is removed
86
- headers_api_chat = get_headers_api_chat(BASE_URL)
87
-
88
- if request.model == 'o1-preview':
89
- delay_seconds = random.randint(1, 60)
90
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
91
- await asyncio.sleep(delay_seconds)
92
-
93
- # Fetch the h-value for the 'validated' field
94
- h_value = await getHid()
95
- if not h_value:
96
- logger.error("Failed to retrieve h-value for validation.")
97
- raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
98
-
99
- json_data = {
100
- "agentMode": agent_mode,
101
- "clickedAnswer2": False,
102
- "clickedAnswer3": False,
103
- "clickedForceWebSearch": False,
104
- "codeModelMode": True,
105
- "githubToken": None,
106
- "id": None, # Using request_id instead of chat_id
107
- "isChromeExt": False,
108
- "isMicMode": False,
109
- "maxTokens": request.max_tokens,
110
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
111
- "mobileClient": False,
112
- "playgroundTemperature": request.temperature,
113
- "playgroundTopP": request.top_p,
114
- "previewToken": None,
115
- "trendingAgentMode": trending_agent_mode,
116
- "userId": None,
117
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
118
- "userSystemPrompt": None,
119
- "validated": h_value, # Dynamically set the validated field
120
- "visitFromDelta": False,
121
- }
122
-
123
- async with httpx.AsyncClient() as client:
124
- try:
125
- async with client.stream(
126
- "POST",
127
- f"{BASE_URL}/api/chat",
128
- headers=headers_api_chat,
129
- json=json_data,
130
- timeout=100,
131
- ) as response:
132
- response.raise_for_status()
133
- async for chunk in response.aiter_text():
134
- timestamp = int(datetime.now().timestamp())
135
- if chunk:
136
- content = chunk
137
- if content.startswith("$@$v=undefined-rv1$@$"):
138
- content = content[21:]
139
- # Remove the blocked message if present
140
- if BLOCKED_MESSAGE in content:
141
- logger.info(f"Blocked message detected in response for Request ID {request_id}.")
142
- content = content.replace(BLOCKED_MESSAGE, '').strip()
143
- if not content:
144
- continue # Skip if content is empty after removal
145
- cleaned_content = strip_model_prefix(content, model_prefix)
146
- yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
147
-
148
- yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
149
- yield "data: [DONE]\n\n"
150
- except httpx.HTTPStatusError as e:
151
- logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
152
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
153
- except httpx.RequestError as e:
154
- logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
155
- raise HTTPException(status_code=500, detail=str(e))
156
-
157
- # Process non-streaming response with headers from config.py
158
- async def process_non_streaming_response(request: ChatRequest):
159
- # Generate a unique ID for this request
160
- request_id = f"chatcmpl-{uuid.uuid4()}"
161
- logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
162
-
163
- agent_mode = AGENT_MODE.get(request.model, {})
164
- trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
165
- model_prefix = MODEL_PREFIXES.get(request.model, "")
166
-
167
- # Adjust headers_api_chat and headers_chat since referer_url is removed
168
- headers_api_chat = get_headers_api_chat(BASE_URL)
169
- headers_chat = get_headers_chat(BASE_URL, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
170
-
171
- if request.model == 'o1-preview':
172
- delay_seconds = random.randint(20, 60)
173
- logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
174
- await asyncio.sleep(delay_seconds)
175
-
176
- # Fetch the h-value for the 'validated' field
177
- h_value = await getHid()
178
- if not h_value:
179
- logger.error("Failed to retrieve h-value for validation.")
180
- raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
181
-
182
- json_data = {
183
- "agentMode": agent_mode,
184
- "clickedAnswer2": False,
185
- "clickedAnswer3": False,
186
- "clickedForceWebSearch": False,
187
- "codeModelMode": True,
188
- "githubToken": None,
189
- "id": None, # Using request_id instead of chat_id
190
- "isChromeExt": False,
191
- "isMicMode": False,
192
- "maxTokens": request.max_tokens,
193
- "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
194
- "mobileClient": False,
195
- "playgroundTemperature": request.temperature,
196
- "playgroundTopP": request.top_p,
197
- "previewToken": None,
198
- "trendingAgentMode": trending_agent_mode,
199
- "userId": None,
200
- "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
201
- "userSystemPrompt": None,
202
- "validated": h_value, # Dynamically set the validated field
203
- "visitFromDelta": False,
204
- }
205
-
206
- full_response = ""
207
- async with httpx.AsyncClient() as client:
208
- try:
209
- async with client.stream(
210
- method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
211
- ) as response:
212
- response.raise_for_status()
213
- async for chunk in response.aiter_text():
214
- full_response += chunk
215
- except httpx.HTTPStatusError as e:
216
- logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
217
- raise HTTPException(status_code=e.response.status_code, detail=str(e))
218
- except httpx.RequestError as e:
219
- logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
220
- raise HTTPException(status_code=500, detail=str(e))
221
-
222
- if full_response.startswith("$@$v=undefined-rv1$@$"):
223
- full_response = full_response[21:]
224
-
225
- # Remove the blocked message if present
226
- if BLOCKED_MESSAGE in full_response:
227
- logger.info(f"Blocked message detected in response for Request ID {request_id}.")
228
- full_response = full_response.replace(BLOCKED_MESSAGE, '').strip()
229
- if not full_response:
230
- raise HTTPException(status_code=500, detail="Blocked message detected in response.")
231
-
232
- cleaned_full_response = strip_model_prefix(full_response, model_prefix)
233
-
234
- return {
235
- "id": f"chatcmpl-{uuid.uuid4()}",
236
- "object": "chat.completion",
237
- "created": int(datetime.now().timestamp()),
238
- "model": request.model,
239
- "choices": [
240
- {
241
- "index": 0,
242
- "message": {"role": "assistant", "content": cleaned_full_response},
243
- "finish_reason": "stop",
244
- }
245
- ],
246
- "usage": None,
247
- }
 
 
1
+ from datetime import datetime
2
+ import json
3
+ import uuid
4
+ import asyncio
5
+ import random
6
+ import string
7
+ from typing import Any, Dict, Optional
8
+
9
+ import httpx
10
+ from fastapi import HTTPException
11
+ from api.config import (
12
+ MODEL_MAPPING,
13
+ get_headers_api_chat,
14
+ get_headers_chat,
15
+ BASE_URL,
16
+ AGENT_MODE,
17
+ TRENDING_AGENT_MODE,
18
+ MODEL_PREFIXES,
19
+ MODEL_REFERERS
20
+ )
21
+ from api.models import ChatRequest
22
+ from api.logger import setup_logger
23
+ from api.validate import getHid # Import the asynchronous getHid function
24
+
25
+ logger = setup_logger(__name__)
26
+
27
+ # Define the blocked message
28
+ BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai"
29
+
30
+ # Helper function to create chat completion data
31
+ def create_chat_completion_data(
32
+ content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
33
+ ) -> Dict[str, Any]:
34
+ return {
35
+ "id": f"chatcmpl-{uuid.uuid4()}",
36
+ "object": "chat.completion.chunk",
37
+ "created": timestamp,
38
+ "model": model,
39
+ "choices": [
40
+ {
41
+ "index": 0,
42
+ "delta": {"content": content, "role": "assistant"},
43
+ "finish_reason": finish_reason,
44
+ }
45
+ ],
46
+ "usage": None,
47
+ }
48
+
49
+ # Function to convert message to dictionary format, ensuring base64 data and optional model prefix
50
+ def message_to_dict(message, model_prefix: Optional[str] = None):
51
+ content = message.content if isinstance(message.content, str) else message.content[0]["text"]
52
+ if model_prefix:
53
+ content = f"{model_prefix} {content}"
54
+ if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
55
+ # Ensure base64 images are always included for all models
56
+ return {
57
+ "role": message.role,
58
+ "content": content,
59
+ "data": {
60
+ "imageBase64": message.content[1]["image_url"]["url"],
61
+ "fileText": "",
62
+ "title": "snapshot",
63
+ },
64
+ }
65
+ return {"role": message.role, "content": content}
66
+
67
+ # Function to strip model prefix from content if present
68
+ def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
69
+ """Remove the model prefix from the response content if present."""
70
+ if model_prefix and content.startswith(model_prefix):
71
+ logger.debug(f"Stripping prefix '{model_prefix}' from content.")
72
+ return content[len(model_prefix):].strip()
73
+ return content
74
+
75
+ # Process streaming response with headers from config.py
76
+ async def process_streaming_response(request: ChatRequest):
77
+ # Generate a unique ID for this request
78
+ request_id = f"chatcmpl-{uuid.uuid4()}"
79
+ logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
80
+
81
+ agent_mode = AGENT_MODE.get(request.model, {})
82
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
83
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
84
+
85
+ # Adjust headers_api_chat since referer_url is removed
86
+ headers_api_chat = get_headers_api_chat(BASE_URL)
87
+
88
+ if request.model == 'o1-preview':
89
+ delay_seconds = random.randint(1, 60)
90
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
91
+ await asyncio.sleep(delay_seconds)
92
+
93
+ # Fetch the h-value for the 'validated' field
94
+ h_value = await getHid()
95
+ if not h_value:
96
+ logger.error("Failed to retrieve h-value for validation.")
97
+ raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
98
+
99
+ json_data = {
100
+ "agentMode": agent_mode,
101
+ "clickedAnswer2": False,
102
+ "clickedAnswer3": False,
103
+ "clickedForceWebSearch": False,
104
+ "codeModelMode": False,
105
+ "githubToken": None,
106
+ "id": None, # Using request_id instead of chat_id
107
+ "isChromeExt": False,
108
+ "isMicMode": False,
109
+ "maxTokens": request.max_tokens,
110
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
111
+ "mobileClient": False,
112
+ "playgroundTemperature": request.temperature,
113
+ "playgroundTopP": request.top_p,
114
+ "previewToken": None,
115
+ "trendingAgentMode": trending_agent_mode,
116
+ "userId": None,
117
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
118
+ "userSystemPrompt": None,
119
+ "validated": h_value, # Dynamically set the validated field
120
+ "visitFromDelta": False,
121
+ "webSearchModePrompt": False,
122
+ }
123
+
124
+ async with httpx.AsyncClient() as client:
125
+ try:
126
+ async with client.stream(
127
+ "POST",
128
+ f"{BASE_URL}/api/chat",
129
+ headers=headers_api_chat,
130
+ json=json_data,
131
+ timeout=100,
132
+ ) as response:
133
+ response.raise_for_status()
134
+ async for chunk in response.aiter_text():
135
+ timestamp = int(datetime.now().timestamp())
136
+ if chunk:
137
+ content = chunk
138
+ if content.startswith("$@$v=undefined-rv1$@$"):
139
+ content = content[21:]
140
+ # Remove the blocked message if present
141
+ if BLOCKED_MESSAGE in content:
142
+ logger.info(f"Blocked message detected in response for Request ID {request_id}.")
143
+ content = content.replace(BLOCKED_MESSAGE, '').strip()
144
+ if not content:
145
+ continue # Skip if content is empty after removal
146
+ cleaned_content = strip_model_prefix(content, model_prefix)
147
+ yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
148
+
149
+ yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
150
+ yield "data: [DONE]\n\n"
151
+ except httpx.HTTPStatusError as e:
152
+ logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
153
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
154
+ except httpx.RequestError as e:
155
+ logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
156
+ raise HTTPException(status_code=500, detail=str(e))
157
+
158
+ # Process non-streaming response with headers from config.py
159
+ async def process_non_streaming_response(request: ChatRequest):
160
+ # Generate a unique ID for this request
161
+ request_id = f"chatcmpl-{uuid.uuid4()}"
162
+ logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")
163
+
164
+ agent_mode = AGENT_MODE.get(request.model, {})
165
+ trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
166
+ model_prefix = MODEL_PREFIXES.get(request.model, "")
167
+
168
+ # Adjust headers_api_chat and headers_chat since referer_url is removed
169
+ headers_api_chat = get_headers_api_chat(BASE_URL)
170
+ headers_chat = get_headers_chat(BASE_URL, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""]))
171
+
172
+ if request.model == 'o1-preview':
173
+ delay_seconds = random.randint(20, 60)
174
+ logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' (Request ID: {request_id})")
175
+ await asyncio.sleep(delay_seconds)
176
+
177
+ # Fetch the h-value for the 'validated' field
178
+ h_value = await getHid()
179
+ if not h_value:
180
+ logger.error("Failed to retrieve h-value for validation.")
181
+ raise HTTPException(status_code=500, detail="Validation failed due to missing h-value.")
182
+
183
+ json_data = {
184
+ "agentMode": agent_mode,
185
+ "clickedAnswer2": False,
186
+ "clickedAnswer3": False,
187
+ "clickedForceWebSearch": False,
188
+ "codeModelMode": True,
189
+ "githubToken": None,
190
+ "id": None, # Using request_id instead of chat_id
191
+ "isChromeExt": False,
192
+ "isMicMode": False,
193
+ "maxTokens": request.max_tokens,
194
+ "messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
195
+ "mobileClient": False,
196
+ "playgroundTemperature": request.temperature,
197
+ "playgroundTopP": request.top_p,
198
+ "previewToken": None,
199
+ "trendingAgentMode": trending_agent_mode,
200
+ "userId": None,
201
+ "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
202
+ "userSystemPrompt": None,
203
+ "validated": h_value, # Dynamically set the validated field
204
+ "visitFromDelta": False,
205
+ }
206
+
207
+ full_response = ""
208
+ async with httpx.AsyncClient() as client:
209
+ try:
210
+ async with client.stream(
211
+ method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data
212
+ ) as response:
213
+ response.raise_for_status()
214
+ async for chunk in response.aiter_text():
215
+ full_response += chunk
216
+ except httpx.HTTPStatusError as e:
217
+ logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
218
+ raise HTTPException(status_code=e.response.status_code, detail=str(e))
219
+ except httpx.RequestError as e:
220
+ logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
221
+ raise HTTPException(status_code=500, detail=str(e))
222
+
223
+ if full_response.startswith("$@$v=undefined-rv1$@$"):
224
+ full_response = full_response[21:]
225
+
226
+ # Remove the blocked message if present
227
+ if BLOCKED_MESSAGE in full_response:
228
+ logger.info(f"Blocked message detected in response for Request ID {request_id}.")
229
+ full_response = full_response.replace(BLOCKED_MESSAGE, '').strip()
230
+ if not full_response:
231
+ raise HTTPException(status_code=500, detail="Blocked message detected in response.")
232
+
233
+ cleaned_full_response = strip_model_prefix(full_response, model_prefix)
234
+
235
+ return {
236
+ "id": f"chatcmpl-{uuid.uuid4()}",
237
+ "object": "chat.completion",
238
+ "created": int(datetime.now().timestamp()),
239
+ "model": request.model,
240
+ "choices": [
241
+ {
242
+ "index": 0,
243
+ "message": {"role": "assistant", "content": cleaned_full_response},
244
+ "finish_reason": "stop",
245
+ }
246
+ ],
247
+ "usage": None,
248
+ }
api/validate.py CHANGED
@@ -6,21 +6,7 @@ from typing import Optional
6
 
7
  base_url = "https://www.blackbox.ai"
8
  headers = {
9
- 'accept': '*/*',
10
- 'accept-language': 'en-US,en;q=0.9',
11
- 'cache-control': 'no-cache',
12
- 'content-type': 'application/json',
13
- 'origin': base_url,
14
- 'pragma': 'no-cache',
15
- 'priority': 'u=1, i',
16
- 'referer': f'{base_url}/',
17
- 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
18
- 'sec-ch-ua-mobile': '?0',
19
- 'sec-ch-ua-platform': '"Linux"',
20
- 'sec-fetch-dest': 'empty',
21
- 'sec-fetch-mode': 'cors',
22
- 'sec-fetch-site': 'same-origin',
23
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
24
  }
25
 
26
  # Cache variables
 
6
 
7
  base_url = "https://www.blackbox.ai"
8
  headers = {
9
+ 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  }
11
 
12
  # Cache variables
requirements.txt CHANGED
@@ -1,6 +1,6 @@
1
- fastapi==0.95.2
2
- httpx==0.23.3
3
- pydantic==1.10.4
4
- python-dotenv==0.21.0
5
- uvicorn==0.21.1
6
- aiohttp
 
1
+ fastapi==0.95.2
2
+ httpx==0.23.3
3
+ pydantic==1.10.4
4
+ python-dotenv==0.21.0
5
+ uvicorn==0.21.1
6
+ aiohttp