test24 / api /utils.py
Niansuh's picture
Update api/utils.py
b0790f1 verified
raw
history blame
8.74 kB
# api/utils.py
from datetime import datetime
import json
import uuid
import asyncio
import random
from typing import Any, Dict, Optional
import httpx
from fastapi import HTTPException
from api.config import (
MODEL_MAPPING,
BASE_URL,
AGENT_MODE,
TRENDING_AGENT_MODE,
MODEL_PREFIXES,
api_headers,
)
from api.models import ChatRequest
from api.logger import setup_logger
# Import the validate module
from api import validate
logger = setup_logger(__name__)
# Helper function to create chat completion data
def create_chat_completion_data(
content: str,
model: str,
timestamp: int,
finish_reason: Optional[str] = None
) -> Dict[str, Any]:
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion.chunk",
"created": timestamp,
"model": model,
"choices": [
{
"index": 0,
"delta": {"content": content},
"finish_reason": finish_reason,
}
],
"usage": None,
}
# Function to convert message to dictionary format, ensuring base64 data and optional model prefix
def message_to_dict(message, model_prefix: Optional[str] = None):
content = (
message.content
if isinstance(message.content, str)
else message.content[0]["text"]
)
if model_prefix:
content = f"{model_prefix} {content}"
if (
isinstance(message.content, list)
and len(message.content) == 2
and "image_url" in message.content[1]
):
# Ensure base64 images are always included for all models
return {
"role": message.role,
"content": content,
"data": {
"imageBase64": message.content[1]["image_url"]["url"],
"fileText": "",
"title": "snapshot",
},
}
return {"role": message.role, "content": content}
# Function to strip model prefix from content if present
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
"""Remove the model prefix from the response content if present."""
if model_prefix and content.startswith(model_prefix):
logger.debug(f"Stripping prefix '{model_prefix}' from content.")
return content[len(model_prefix):].strip()
return content
# Process streaming response
async def process_streaming_response(request: ChatRequest):
logger.info(f"Model: {request.model}")
agent_mode = AGENT_MODE.get(request.model, {})
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
model_prefix = MODEL_PREFIXES.get(request.model, "")
if request.model == 'o1-preview':
delay_seconds = random.randint(1, 60)
logger.info(
f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'"
)
await asyncio.sleep(delay_seconds)
json_data = {
"messages": [
message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
],
"previewToken": None,
"userId": None,
"codeModelMode": True,
"agentMode": agent_mode,
"trendingAgentMode": trending_agent_mode,
"isMicMode": False,
"userSystemPrompt": None,
"maxTokens": request.max_tokens,
"playgroundTopP": request.top_p,
"playgroundTemperature": request.temperature,
"isChromeExt": False,
"githubToken": None,
"clickedAnswer2": False,
"clickedAnswer3": False,
"clickedForceWebSearch": False,
"visitFromDelta": False,
"mobileClient": False,
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
"validated": validate.getHid(),
}
async with httpx.AsyncClient() as client:
try:
async with client.stream(
"POST",
f"{BASE_URL}/api/chat",
headers=api_headers,
json=json_data,
timeout=100,
) as response:
response.raise_for_status()
async for line in response.aiter_lines():
if line:
# Process the line as per the Blackbox API response format
if "https://www.blackbox.ai" in line:
# Refresh hid and inform the user
validate.getHid(True)
content = "hid已刷新,重新对话即可"
yield (
f"data: {json.dumps(create_chat_completion_data(content, request.model, int(datetime.now().timestamp())))}\n\n"
)
break
if line.startswith("$@$v=undefined-rv1$@$"):
line = line[21:]
cleaned_content = strip_model_prefix(line, model_prefix)
# Yield the data in the format expected by the client
yield (
f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, int(datetime.now().timestamp())))}\n\n"
)
# Signal that the response is complete
yield (
f"data: {json.dumps(create_chat_completion_data('', request.model, int(datetime.now().timestamp()), 'stop'))}\n\n"
)
yield "data: [DONE]\n\n"
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except httpx.RequestError as e:
logger.error(f"Error occurred during request: {e}")
raise HTTPException(status_code=500, detail=str(e))
# Process non-streaming response
async def process_non_streaming_response(request: ChatRequest):
logger.info(f"Model: {request.model}")
agent_mode = AGENT_MODE.get(request.model, {})
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
model_prefix = MODEL_PREFIXES.get(request.model, "")
if request.model == 'o1-preview':
delay_seconds = random.randint(20, 60)
logger.info(
f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'"
)
await asyncio.sleep(delay_seconds)
json_data = {
"messages": [
message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages
],
"previewToken": None,
"userId": None,
"codeModelMode": True,
"agentMode": agent_mode,
"trendingAgentMode": trending_agent_mode,
"isMicMode": False,
"userSystemPrompt": None,
"maxTokens": request.max_tokens,
"playgroundTopP": request.top_p,
"playgroundTemperature": request.temperature,
"isChromeExt": False,
"githubToken": None,
"clickedAnswer2": False,
"clickedAnswer3": False,
"clickedForceWebSearch": False,
"visitFromDelta": False,
"mobileClient": False,
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
"validated": validate.getHid(),
}
full_response = ""
async with httpx.AsyncClient() as client:
try:
response = await client.post(
url=f"{BASE_URL}/api/chat",
headers=api_headers,
json=json_data,
)
response.raise_for_status()
full_response = response.text
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except httpx.RequestError as e:
logger.error(f"Error occurred during request: {e}")
raise HTTPException(status_code=500, detail=str(e))
if "https://www.blackbox.ai" in full_response:
# Refresh hid and inform the user
validate.getHid(True)
full_response = "hid已刷新,重新对话即可"
if full_response.startswith("$@$v=undefined-rv1$@$"):
full_response = full_response[21:]
cleaned_full_response = strip_model_prefix(full_response, model_prefix)
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion",
"created": int(datetime.now().timestamp()),
"model": request.model,
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": cleaned_full_response},
"finish_reason": "stop",
}
],
"usage": None,
}