test24 / api /utils.py
Niansuh's picture
Upload 13 files
d96bc9d verified
raw
history blame
8.48 kB
from datetime import datetime
import json
from typing import Any, Dict, Optional
import httpx
from api.config import (
MODEL_MAPPING,
headers,
AGENT_MODE,
TRENDING_AGENT_MODE,
BASE_URL,
MODEL_PREFIXES,
MODEL_REFERERS
)
from fastapi import HTTPException
from api.models import ChatRequest
from api.logger import setup_logger
import uuid
import asyncio
import random # Newly added imports
logger = setup_logger(__name__)
def create_chat_completion_data(
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
) -> Dict[str, Any]:
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion.chunk",
"created": timestamp,
"model": model,
"choices": [
{
"index": 0,
"delta": {"content": content, "role": "assistant"},
"finish_reason": finish_reason,
}
],
"usage": None,
}
def message_to_dict(message, model_prefix: Optional[str] = None):
if isinstance(message.content, str):
content = message.content
if model_prefix:
content = f"{model_prefix} {content}"
return {"role": message.role, "content": content}
elif isinstance(message.content, list) and len(message.content) == 2:
content = message.content[0]["text"]
if model_prefix:
content = f"{model_prefix} {content}"
return {
"role": message.role,
"content": content,
"data": {
"imageBase64": message.content[1]["image_url"]["url"],
"fileText": "",
"title": "snapshot",
},
}
else:
return {"role": message.role, "content": message.content}
def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str:
"""Remove the model prefix from the response content if present."""
if model_prefix and content.startswith(model_prefix):
logger.debug(f"Stripping prefix '{model_prefix}' from content.")
return content[len(model_prefix):].strip()
logger.debug("No prefix to strip from content.")
return content
async def process_streaming_response(request: ChatRequest):
agent_mode = AGENT_MODE.get(request.model, {})
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
model_prefix = MODEL_PREFIXES.get(request.model, "")
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
referer_url = f"{BASE_URL}{referer_path}"
# Update headers with dynamic Referer
dynamic_headers = headers.copy()
dynamic_headers['Referer'] = referer_url
# Introduce delay for 'o1-preview' model
if request.model == 'o1-preview':
delay_seconds = random.randint(1, 60)
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
await asyncio.sleep(delay_seconds)
json_data = {
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
"previewToken": None,
"userId": None,
"codeModelMode": True,
"agentMode": agent_mode,
"trendingAgentMode": trending_agent_mode,
"isMicMode": False,
"userSystemPrompt": None,
"maxTokens": request.max_tokens,
"playgroundTopP": request.top_p,
"playgroundTemperature": request.temperature,
"isChromeExt": False,
"githubToken": None,
"clickedAnswer2": False,
"clickedAnswer3": False,
"clickedForceWebSearch": False,
"validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
"visitFromDelta": False,
"mobileClient": False,
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
}
async with httpx.AsyncClient() as client:
try:
async with client.stream(
"POST",
f"{BASE_URL}/api/chat",
headers=dynamic_headers,
json=json_data,
timeout=100,
) as response:
response.raise_for_status()
async for line in response.aiter_lines():
timestamp = int(datetime.now().timestamp())
if line:
content = line
if content.startswith("$@$v=undefined-rv1$@$"):
content = content[21:]
# Strip the model prefix from the response content
cleaned_content = strip_model_prefix(content, model_prefix)
yield f"data: {json.dumps(create_chat_completion_data(cleaned_content, request.model, timestamp))}\n\n"
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
yield "data: [DONE]\n\n"
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except httpx.RequestError as e:
logger.error(f"Error occurred during request: {e}")
raise HTTPException(status_code=500, detail=str(e))
async def process_non_streaming_response(request: ChatRequest):
agent_mode = AGENT_MODE.get(request.model, {})
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
model_prefix = MODEL_PREFIXES.get(request.model, "")
referer_path = MODEL_REFERERS.get(request.model, f"/?model={request.model}")
referer_url = f"{BASE_URL}{referer_path}"
# Update headers with dynamic Referer
dynamic_headers = headers.copy()
dynamic_headers['Referer'] = referer_url
# Introduce delay for 'o1-preview' model
if request.model == 'o1-preview':
delay_seconds = random.randint(20, 60)
logger.info(f"Introducing a delay of {delay_seconds} seconds for model 'o1-preview'")
await asyncio.sleep(delay_seconds)
json_data = {
"messages": [message_to_dict(msg, model_prefix=model_prefix) for msg in request.messages],
"previewToken": None,
"userId": None,
"codeModelMode": True,
"agentMode": agent_mode,
"trendingAgentMode": trending_agent_mode,
"isMicMode": False,
"userSystemPrompt": None,
"maxTokens": request.max_tokens,
"playgroundTopP": request.top_p,
"playgroundTemperature": request.temperature,
"isChromeExt": False,
"githubToken": None,
"clickedAnswer2": False,
"clickedAnswer3": False,
"clickedForceWebSearch": False,
"validated": "69783381-2ce4-4dbd-ac78-35e9063feabc",
"visitFromDelta": False,
"mobileClient": False,
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
}
full_response = ""
async with httpx.AsyncClient() as client:
try:
async with client.stream(
method="POST", url=f"{BASE_URL}/api/chat", headers=dynamic_headers, json=json_data
) as response:
response.raise_for_status()
async for chunk in response.aiter_text():
full_response += chunk
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except httpx.RequestError as e:
logger.error(f"Error occurred during request: {e}")
raise HTTPException(status_code=500, detail=str(e))
if full_response.startswith("$@$v=undefined-rv1$@$"):
full_response = full_response[21:]
# Strip the model prefix from the full response
cleaned_full_response = strip_model_prefix(full_response, model_prefix)
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion",
"created": int(datetime.now().timestamp()),
"model": request.model,
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": cleaned_full_response},
"finish_reason": "stop",
}
],
"usage": None,
}