|
from datetime import datetime |
|
import json |
|
from typing import Any, Dict, Optional |
|
import uuid |
|
|
|
import httpx |
|
from fastapi import Depends, HTTPException |
|
from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials |
|
|
|
from api import validate |
|
from api.config import ( |
|
MODEL_MAPPING, |
|
headers, |
|
AGENT_MODE, |
|
TRENDING_AGENT_MODE, |
|
APP_SECRET, |
|
BASE_URL, |
|
) |
|
from api.models import ChatRequest |
|
|
|
from api.logger import setup_logger |
|
|
|
logger = setup_logger(__name__) |
|
|
|
|
|
bearer_scheme = HTTPBearer() |
|
|
|
|
|
def create_chat_completion_data( |
|
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None |
|
) -> Dict[str, Any]: |
|
""" |
|
Create a dictionary representing a chat completion chunk. |
|
""" |
|
return { |
|
"id": f"chatcmpl-{uuid.uuid4()}", |
|
"object": "chat.completion.chunk", |
|
"created": timestamp, |
|
"model": model, |
|
"choices": [ |
|
{ |
|
"index": 0, |
|
"delta": {"content": content, "role": "assistant"}, |
|
"finish_reason": finish_reason, |
|
} |
|
], |
|
"usage": None, |
|
} |
|
|
|
|
|
def verify_app_secret(credentials: HTTPAuthorizationCredentials = Depends(bearer_scheme)): |
|
""" |
|
Verify the APP_SECRET from the authorization credentials. |
|
""" |
|
if credentials.credentials != APP_SECRET: |
|
logger.warning("Invalid APP_SECRET provided.") |
|
raise HTTPException(status_code=403, detail="Invalid APP_SECRET") |
|
logger.debug("APP_SECRET verified successfully.") |
|
return credentials.credentials |
|
|
|
|
|
def message_to_dict(message): |
|
""" |
|
Convert a message object to a dictionary suitable for the API request. |
|
Handles different content types gracefully. |
|
""" |
|
message_dict = {"role": message.role} |
|
|
|
if isinstance(message.content, str): |
|
message_dict["content"] = message.content |
|
elif isinstance(message.content, list): |
|
|
|
try: |
|
if len(message.content) >= 2: |
|
|
|
text_content = message.content[0].get("text", "") |
|
image_url = message.content[1].get("image_url", {}).get("url", "") |
|
message_dict["content"] = text_content |
|
message_dict["data"] = { |
|
"imageBase64": image_url, |
|
"fileText": "", |
|
"title": "snapshot", |
|
} |
|
else: |
|
|
|
message_dict["content"] = json.dumps(message.content) |
|
except (AttributeError, KeyError, TypeError) as e: |
|
logger.error(f"Error parsing message content: {e}") |
|
message_dict["content"] = "Invalid message format." |
|
else: |
|
|
|
message_dict["content"] = str(message.content) |
|
|
|
return message_dict |
|
|
|
|
|
def get_agent_mode(model: str) -> Dict[str, Any]: |
|
""" |
|
Retrieves the agent mode configuration. |
|
Falls back to an empty dictionary if not found. |
|
""" |
|
agent_mode = AGENT_MODE.get(model, {}) |
|
if not agent_mode: |
|
logger.warning(f"No AGENT_MODE configuration found for model: {model}") |
|
return agent_mode |
|
|
|
|
|
def get_trending_agent_mode(model: str) -> Dict[str, Any]: |
|
""" |
|
Retrieves the trending agent mode configuration. |
|
Falls back to an empty dictionary if not found. |
|
""" |
|
trending_agent_mode = TRENDING_AGENT_MODE.get(model, {}) |
|
if not trending_agent_mode: |
|
logger.warning(f"No TRENDING_AGENT_MODE configuration found for model: {model}") |
|
return trending_agent_mode |
|
|
|
|
|
async def process_streaming_response(request: ChatRequest): |
|
""" |
|
Process a streaming response for a chat completion request. |
|
""" |
|
agent_mode = get_agent_mode(request.model) |
|
trending_agent_mode = get_trending_agent_mode(request.model) |
|
|
|
|
|
logger.info( |
|
f"Streaming request for model: '{request.model}', " |
|
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}" |
|
) |
|
|
|
json_data = { |
|
"messages": [message_to_dict(msg) for msg in request.messages], |
|
"previewToken": None, |
|
"userId": None, |
|
"codeModelMode": True, |
|
"agentMode": agent_mode, |
|
"trendingAgentMode": trending_agent_mode, |
|
"isMicMode": False, |
|
"userSystemPrompt": None, |
|
"maxTokens": request.max_tokens, |
|
"playgroundTopP": request.top_p, |
|
"playgroundTemperature": request.temperature, |
|
"isChromeExt": False, |
|
"githubToken": None, |
|
"clickedAnswer2": False, |
|
"clickedAnswer3": False, |
|
"clickedForceWebSearch": False, |
|
"visitFromDelta": False, |
|
"mobileClient": False, |
|
"userSelectedModel": MODEL_MAPPING.get(request.model), |
|
"validated": validate.getHid(), |
|
} |
|
|
|
async with httpx.AsyncClient() as client: |
|
try: |
|
async with client.stream( |
|
"POST", |
|
f"{BASE_URL}/api/chat", |
|
headers=headers, |
|
json=json_data, |
|
timeout=httpx.Timeout(100.0), |
|
) as response: |
|
response.raise_for_status() |
|
timestamp = int(datetime.now().timestamp()) |
|
async for line in response.aiter_lines(): |
|
if line: |
|
content = line.strip() + "\n" |
|
if "https://www.blackbox.ai" in content: |
|
validate.getHid(True) |
|
content = "hid已刷新,重新对话即可\n" |
|
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n" |
|
break |
|
if content.startswith("$@$v=undefined-rv1$@$"): |
|
content = content[21:] |
|
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n" |
|
|
|
|
|
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n" |
|
yield "data: [DONE]\n\n" |
|
except httpx.HTTPStatusError as e: |
|
logger.error(f"HTTP error occurred: {e.response.status_code} - {e.response.text}") |
|
raise HTTPException(status_code=e.response.status_code, detail="Error from upstream service.") |
|
except httpx.RequestError as e: |
|
logger.error(f"Request error occurred: {e}") |
|
raise HTTPException(status_code=500, detail="Internal server error.") |
|
except Exception as e: |
|
logger.error(f"Unexpected error: {e}") |
|
raise HTTPException(status_code=500, detail="Internal server error.") |
|
|
|
|
|
async def process_non_streaming_response(request: ChatRequest): |
|
""" |
|
Process a non-streaming response for a chat completion request. |
|
""" |
|
agent_mode = get_agent_mode(request.model) |
|
trending_agent_mode = get_trending_agent_mode(request.model) |
|
|
|
|
|
logger.info( |
|
f"Non-streaming request for model: '{request.model}', " |
|
f"agent mode: {agent_mode}, trending agent mode: {trending_agent_mode}" |
|
) |
|
|
|
json_data = { |
|
"messages": [message_to_dict(msg) for msg in request.messages], |
|
"previewToken": None, |
|
"userId": None, |
|
"codeModelMode": True, |
|
"agentMode": agent_mode, |
|
"trendingAgentMode": trending_agent_mode, |
|
"isMicMode": False, |
|
"userSystemPrompt": None, |
|
"maxTokens": request.max_tokens, |
|
"playgroundTopP": request.top_p, |
|
"playgroundTemperature": request.temperature, |
|
"isChromeExt": False, |
|
"githubToken": None, |
|
"clickedAnswer2": False, |
|
"clickedAnswer3": False, |
|
"clickedForceWebSearch": False, |
|
"visitFromDelta": False, |
|
"mobileClient": False, |
|
"userSelectedModel": MODEL_MAPPING.get(request.model), |
|
"validated": validate.getHid(), |
|
} |
|
|
|
try: |
|
async with httpx.AsyncClient() as client: |
|
response = await client.post( |
|
f"{BASE_URL}/api/chat", |
|
headers=headers, |
|
json=json_data, |
|
timeout=httpx.Timeout(100.0), |
|
) |
|
response.raise_for_status() |
|
full_response = response.text |
|
|
|
|
|
if "https://www.blackbox.ai" in full_response: |
|
validate.getHid(True) |
|
full_response = "hid已刷新,重新对话即可" |
|
if full_response.startswith("$@$v=undefined-rv1$@$"): |
|
full_response = full_response[21:] |
|
|
|
return { |
|
"id": f"chatcmpl-{uuid.uuid4()}", |
|
"object": "chat.completion", |
|
"created": int(datetime.now().timestamp()), |
|
"model": request.model, |
|
"choices": [ |
|
{ |
|
"index": 0, |
|
"message": {"role": "assistant", "content": full_response}, |
|
"finish_reason": "stop", |
|
} |
|
], |
|
"usage": None, |
|
} |
|
except httpx.HTTPStatusError as e: |
|
logger.error(f"HTTP error occurred: {e.response.status_code} - {e.response.text}") |
|
raise HTTPException(status_code=e.response.status_code, detail="Error from upstream service.") |
|
except httpx.RequestError as e: |
|
logger.error(f"Request error occurred: {e}") |
|
raise HTTPException(status_code=500, detail="Internal server error.") |
|
except Exception as e: |
|
logger.error(f"Unexpected error: {e}") |
|
raise HTTPException(status_code=500, detail="Internal server error.") |
|
|