test24 / api /utils.py
Niansuh's picture
Update api/utils.py
a51796a verified
raw
history blame
6.82 kB
from datetime import datetime
import json
from typing import Any, Dict, Optional
import uuid
import httpx
from api.config import (
MODEL_MAPPING,
headers,
AGENT_MODE,
TRENDING_AGENT_MODE,
BASE_URL,
)
from fastapi import HTTPException
from api.models import ChatRequest
from api.logger import setup_logger
logger = setup_logger(__name__)
def clean_content(content: str) -> str:
"""
Remove unnecessary blank lines while preserving code formatting.
"""
lines = content.splitlines()
cleaned_lines = []
previous_line_empty = False
for line in lines:
if line.strip() == '':
if not previous_line_empty:
cleaned_lines.append('')
previous_line_empty = True
# Skip adding multiple consecutive empty lines
else:
cleaned_lines.append(line.rstrip())
previous_line_empty = False
return '\n'.join(cleaned_lines)
def create_chat_completion_data(
content: str, model: str, timestamp: int, finish_reason: Optional[str] = None
) -> Dict[str, Any]:
# Clean the content to remove extra blank lines but preserve formatting
cleaned_content = clean_content(content)
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion.chunk",
"created": timestamp,
"model": model,
"choices": [
{
"index": 0,
"delta": {"content": cleaned_content, "role": "assistant"},
"finish_reason": finish_reason,
}
],
"usage": None,
}
def message_to_dict(message):
if isinstance(message.content, str):
result = {"role": message.role, "content": message.content}
if hasattr(message, 'data'):
result['data'] = message.data
return result
elif isinstance(message.content, list) and len(message.content) == 2:
return {
"role": message.role,
"content": message.content[0]["text"],
"data": {
"imageBase64": message.content[1]["image_url"]["url"],
"fileText": "",
"title": "snapshot",
},
}
else:
return {"role": message.role, "content": message.content}
async def process_streaming_response(request: ChatRequest):
agent_mode = AGENT_MODE.get(request.model, {})
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
json_data = {
"messages": [message_to_dict(msg) for msg in request.messages],
"previewToken": None,
"userId": None,
"codeModelMode": True,
"agentMode": agent_mode,
"trendingAgentMode": trending_agent_mode,
"isMicMode": False,
"userSystemPrompt": None,
"maxTokens": request.max_tokens,
"playgroundTopP": request.top_p,
"playgroundTemperature": request.temperature,
"isChromeExt": False,
"githubToken": None,
"clickedAnswer2": False,
"clickedAnswer3": False,
"clickedForceWebSearch": False,
"visitFromDelta": False,
"mobileClient": False,
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
}
async with httpx.AsyncClient() as client:
try:
async with client.stream(
"POST",
f"{BASE_URL}/api/chat",
headers=headers,
json=json_data,
timeout=100,
) as response:
response.raise_for_status()
async for line in response.aiter_lines():
timestamp = int(datetime.now().timestamp())
if line:
content = line
if content.startswith("$@$v=undefined-rv1$@$"):
content = content[21:]
yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n"
yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n"
yield "data: [DONE]\n\n"
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except httpx.RequestError as e:
logger.error(f"Error occurred during request: {e}")
raise HTTPException(status_code=500, detail=str(e))
async def process_non_streaming_response(request: ChatRequest):
agent_mode = AGENT_MODE.get(request.model, {})
trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {})
json_data = {
"messages": [message_to_dict(msg) for msg in request.messages],
"previewToken": None,
"userId": None,
"codeModelMode": True,
"agentMode": agent_mode,
"trendingAgentMode": trending_agent_mode,
"isMicMode": False,
"userSystemPrompt": None,
"maxTokens": request.max_tokens,
"playgroundTopP": request.top_p,
"playgroundTemperature": request.temperature,
"isChromeExt": False,
"githubToken": None,
"clickedAnswer2": False,
"clickedAnswer3": False,
"clickedForceWebSearch": False,
"visitFromDelta": False,
"mobileClient": False,
"userSelectedModel": MODEL_MAPPING.get(request.model, request.model),
}
full_response = ""
async with httpx.AsyncClient() as client:
try:
async with client.stream(
method="POST", url=f"{BASE_URL}/api/chat", headers=headers, json=json_data
) as response:
response.raise_for_status()
async for chunk in response.aiter_text():
full_response += chunk
except httpx.HTTPStatusError as e:
logger.error(f"HTTP error occurred: {e}")
raise HTTPException(status_code=e.response.status_code, detail=str(e))
except httpx.RequestError as e:
logger.error(f"Error occurred during request: {e}")
raise HTTPException(status_code=500, detail=str(e))
if full_response.startswith("$@$v=undefined-rv1$@$"):
full_response = full_response[21:]
# Clean the content to remove extra blank lines but preserve formatting
cleaned_response = clean_content(full_response)
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion",
"created": int(datetime.now().timestamp()),
"model": request.model,
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": cleaned_response},
"finish_reason": "stop",
}
],
"usage": None,
}