File size: 12,022 Bytes
381d345
 
 
 
 
b63ba8d
3f3cdfd
40c87d6
fec555d
381d345
 
 
 
 
 
 
 
 
 
b63ba8d
381d345
b63ba8d
381d345
 
 
 
 
fec555d
381d345
 
3f3cdfd
 
 
589c428
381d345
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dde279d
381d345
589c428
381d345
 
b63ba8d
fec555d
b63ba8d
 
 
 
 
 
 
 
fec555d
b63ba8d
 
 
 
 
 
 
fcb5830
b63ba8d
7e61071
 
 
 
 
 
 
 
381d345
0aca819
381d345
 
 
 
 
 
 
 
 
 
 
 
 
 
3f3cdfd
 
 
 
381d345
 
 
 
 
 
3f3cdfd
 
 
381d345
 
 
 
 
 
dde279d
381d345
dde279d
381d345
 
 
3f3cdfd
 
 
381d345
 
 
 
 
 
 
 
b63ba8d
381d345
 
dde279d
381d345
 
 
 
 
3f3cdfd
 
 
 
 
381d345
 
1fe1d3e
 
 
 
 
381d345
 
 
 
1fe1d3e
 
 
381d345
1fe1d3e
381d345
1fe1d3e
 
 
 
 
3f3cdfd
1fe1d3e
 
 
 
 
 
4221b2a
1fe1d3e
 
 
 
 
381d345
 
 
 
 
1fe1d3e
381d345
 
0aca819
381d345
 
 
 
 
 
 
 
 
fec555d
381d345
fec555d
 
 
 
 
381d345
 
fec555d
3f3cdfd
 
 
 
381d345
 
 
 
 
 
3f3cdfd
 
 
381d345
 
 
 
 
 
dde279d
381d345
dde279d
381d345
 
 
3f3cdfd
 
 
381d345
 
 
 
 
 
 
 
b63ba8d
381d345
cfe381f
dde279d
381d345
 
3f3cdfd
381d345
 
3f3cdfd
 
 
 
 
 
 
 
 
381d345
 
 
 
3f3cdfd
 
 
381d345
b63ba8d
381d345
 
 
7e61071
381d345
b63ba8d
 
 
381d345
 
b63ba8d
 
 
381d345
 
 
4221b2a
3f3cdfd
 
 
381d345
 
 
 
 
 
 
 
 
 
 
 
 
65e5192
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
from datetime import datetime
import json
import uuid
import asyncio
import random
import string
import os
from typing import Any, Dict, Optional

import httpx
from fastapi import HTTPException
from api.config import (
    MODEL_MAPPING,
    get_headers_api_chat,
    get_headers_chat,
    BASE_URL,
    AGENT_MODE,
    TRENDING_AGENT_MODE,
    MODEL_PREFIXES,
    MODEL_REFERERS
)
from api.models import ChatRequest
from api.logger import setup_logger
from api.validate import getHid  # Import the asynchronous getHid function

logger = setup_logger(__name__)

# Define the blocked message
BLOCKED_MESSAGE = "Generated by BLACKBOX.AI, try unlimited chat https://www.blackbox.ai"

# Fetch the advertisement text from environment variable
ADVERTISEMENT_TEXT = os.getenv("ADVERTISEMENT_TEXT", "")

# Helper function to create chat completion data
def create_chat_completion_data(
    content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
) -> Dict[str, Any]:
    return {
        "id": f"chatcmpl-{uuid.uuid4()}",
        "object": "chat.completion.chunk",
        "created": timestamp,
        "model": model,
        "choices": [
            {
                "index": 0,
                "delta": {"content": content, "role": "assistant"},
                "finish_reason": finish_reason,
            }
        ],
        "usage": None,
    }

# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
def message_to_dict(message, model_prefix: Optional[str] = None):
    content = message.content if isinstance(message.content, str) else message.content[0]["text"]
    if model_prefix:
        content = f"{model_prefix} {content}"
    if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]:
        # Ensure base64 images are always included for all models
        image_base64 = message.content[1]["image_url"]["url"]
        return {
            "role": message.role,
            "content": content,
            "data": {
                "imageBase64": image_base64,
                "fileText": "",
                "title": "snapshot",
                # Added imagesData field here
                "imagesData": [
                    {
                        "filePath": f"MultipleFiles/{uuid.uuid4().hex}.jpg",
                        "contents": image_base64
                    }
                ],
            },
        }
    return {"role": message.role, "content": content}

# Function to strip model prefix from content if present
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
    """Remove the model prefix from the response content if present."""
    if model_prefix and content.startswith(model_prefix):
        logger.debug(f"Stripping prefix '{model_prefix}' from content.")
        return content[len(model_prefix):].strip()
    return content

# Process streaming response with headers from config.py
async def process_streaming_response(request: ChatRequest):
    # Generate a unique ID for this request
    request_id = f"chatcmpl-{uuid.uuid4()}"
    logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")

    agent_mode = AGENT_MODE.get(request.model, {})
    trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
    model_prefix = MODEL_PREFIXES.get(request.model, "")

    # Adjust headers_api_chat since referer_url is removed
    headers_api_chat = get_headers_api_chat(BASE_URL)

    if request.model == 'o1-preview':
        delay_seconds = random.randint(1, 60)
        logger.info(
            f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' "
            f"(Request ID: {request_id})"
        )
        await asyncio.sleep(delay_seconds)

    # Fetch the h-value for the 'validated' field
    h_value = await getHid()
    if not h_value:
        logger.error("Failed to retrieve h-value for validation.")
        raise HTTPException(
            status_code=500, detail="Validation failed due to missing h-value."
        )

    json_data = {
        "agentMode": agent_mode,
        "clickedAnswer2": False,
        "clickedAnswer3": False,
        "clickedForceWebSearch": False,
        "codeModelMode": True,
        "githubToken": None,
        "id": None,  # Using request_id instead of chat_id
        "isChromeExt": False,
        "isMicMode": False,
        "maxTokens": request.max_tokens,
        "messages": [
            message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
        ],
        "mobileClient": False,
        "playgroundTemperature": request.temperature,
        "playgroundTopP": request.top_p,
        "previewToken": None,
        "trendingAgentMode": trending_agent_mode,
        "userId": None,
        "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
        "userSystemPrompt": None,
        "validated": h_value,  # Dynamically set the validated field
        "visitFromDelta": False,
        "webSearchModePrompt": False,
        "imageGenerationMode": False,  # Added this line
    }

    async with httpx.AsyncClient() as client:
        try:
            async with client.stream(
                "POST",
                f"{BASE_URL}/api/chat",
                headers=headers_api_chat,
                json=json_data,
                timeout=100,
            ) as response:
                response.raise_for_status()

                # Start processing the chunks and yield them one by one
                timestamp = int(datetime.now().timestamp())
                response_content = ""  # Collect response content

                async for chunk in response.aiter_text():
                    if chunk:
                        content = chunk
                        if content.startswith("$@$v=undefined-rv1$@$"):
                            content = content[21:]  # Remove unwanted prefix

                        # Remove blocked message if present
                        if BLOCKED_MESSAGE in content:
                            logger.info(f"Blocked message detected in response for Request ID {request_id}.")
                            content = content.replace(BLOCKED_MESSAGE, '').strip()

                        if not content:
                            continue  # Skip if content is empty after removal

                        # Clean up the content
                        cleaned_content = strip_model_prefix(content, model_prefix)

                        # Yield each chunk as soon as it's ready
                        yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
                        response_content += cleaned_content  # Collect the full response content

                # After all chunks are processed, add the advertisement text at the end
                if ADVERTISEMENT_TEXT:
                    response_content += "\n\n" + ADVERTISEMENT_TEXT
                    yield f"data: {json.dumps(create_chat_completion_data(response_content, request.model, timestamp, 'stop'))}\n\n"

                # Add the final "done" marker
                yield "data: [DONE]\n\n"

        except httpx.HTTPStatusError as e:
            logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
            raise HTTPException(status_code=e.response.status_code, detail=str(e))
        except httpx.RequestError as e:
            logger.error(f"Error occurred during request for Request ID {request_id}: {e}")
            raise HTTPException(status_code=500, detail=str(e))

# Process non-streaming response with headers from config.py
async def process_non_streaming_response(request: ChatRequest):
    # Generate a unique ID for this request
    request_id = f"chatcmpl-{uuid.uuid4()}"
    logger.info(f"Processing request with ID: {request_id} - Model: {request.model}")

    agent_mode = AGENT_MODE.get(request.model, {})
    trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
    model_prefix = MODEL_PREFIXES.get(request.model, "")

    # Adjust headers_api_chat and headers_chat since referer_url is removed
    headers_api_chat = get_headers_api_chat(BASE_URL)
    headers_chat = get_headers_chat(
        BASE_URL,
        next_action=str(uuid.uuid4()),
        next_router_state_tree=json.dumps([""]),
    )

    if request.model == 'o1-preview':
        delay_seconds = random.randint(20, 60)
        logger.info(
            f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview' "
            f"(Request ID: {request_id})"
        )
        await asyncio.sleep(delay_seconds)

    # Fetch the h-value for the 'validated' field
    h_value = await getHid()
    if not h_value:
        logger.error("Failed to retrieve h-value for validation.")
        raise HTTPException(
            status_code=500, detail="Validation failed due to missing h-value."
        )

    json_data = {
        "agentMode": agent_mode,
        "clickedAnswer2": False,
        "clickedAnswer3": False,
        "clickedForceWebSearch": False,
        "codeModelMode": True,
        "githubToken": None,
        "id": None,  # Using request_id instead of chat_id
        "isChromeExt": False,
        "isMicMode": False,
        "maxTokens": request.max_tokens,
        "messages": [
            message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
        ],
        "mobileClient": False,
        "playgroundTemperature": request.temperature,
        "playgroundTopP": request.top_p,
        "previewToken": None,
        "trendingAgentMode": trending_agent_mode,
        "userId": None,
        "userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
        "userSystemPrompt": None,
        "validated": h_value,  # Dynamically set the validated field
        "visitFromDelta": False,
        "webSearchModePrompt": False,
        "imageGenerationMode": False,  # Added this line
    }

    full_response = ""
    async with httpx.AsyncClient() as client:
        try:
            async with client.stream(
                method="POST",
                url=f"{BASE_URL}/api/chat",
                headers=headers_api_chat,
                json=json_data,
            ) as response:
                response.raise_for_status()
                async for chunk in response.aiter_text():
                    full_response += chunk
        except httpx.HTTPStatusError as e:
            logger.error(f"HTTP error occurred for Request ID {request_id}: {e}")
            raise HTTPException(status_code=e.response.status_code, detail=str(e))
        except httpx.RequestError as e:
            logger.error(
                f"Error occurred during request for Request ID {request_id}: {e}"
            )
            raise HTTPException(status_code=500, detail=str(e))

    if full_response.startswith("$@$v=undefined-rv1$@$"):
        full_response = full_response[21:]

    # Remove the blocked message if present
    if BLOCKED_MESSAGE in full_response:
        logger.info(
            f"Blocked message detected in response for Request ID {request_id}."
        )
        full_response = full_response.replace(BLOCKED_MESSAGE, '').strip()
        if not full_response:
            raise HTTPException(
                status_code=500, detail="Blocked message detected in response."
            )

    cleaned_full_response = strip_model_prefix(full_response, model_prefix)

    # Append the advertisement text only once at the end
    if ADVERTISEMENT_TEXT:
        cleaned_full_response += "\n\n" + ADVERTISEMENT_TEXT

    return {
        "id": f"chatcmpl-{uuid.uuid4()}",
        "object": "chat.completion",
        "created": int(datetime.now().timestamp()),
        "model": request.model,
        "choices": [
            {
                "index": 0,
                "message": {"role": "assistant", "content": cleaned_full_response},
                "finish_reason": "stop",
            }
        ],
        "usage": None,
    }