from datetime import datetime import json import uuid import asyncio import random import string from typing import Any, Dict, Optional import httpx from fastapi import HTTPException from api.config import ( AGENT_MODE, TRENDING_AGENT_MODE, get_headers_api_chat, get_headers_chat, BASE_URL, ) from api.models import ChatRequest from api.logger import setup_logger logger = setup_logger(__name__) # Updated message ID generator def generate_chat_id(length: int = 7) -> str: characters = string.ascii_letters + string.digits return ''.join(random.choice(characters) for _ in range(length)) # Updated message_to_dict function (removed model prefixes) def message_to_dict(message): content = message.content if isinstance(message.content, str) else message.content[0]["text"] if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]: # Ensure base64 images are always included for all models return { "role": message.role, "content": content, "data": { "imageBase64": message.content[1]["image_url"]["url"], "fileText": "", "title": "snapshot", }, } return {"role": message.role, "content": content} # Removed strip_model_prefix function as per your requirement # Function to get the correct referer URL def get_referer_url(chat_id: str, model: str) -> str: """Generate the referer URL based on specific models listed in MODEL_REFERERS.""" if model in MODEL_REFERERS: return f"{BASE_URL}{MODEL_REFERERS[model]}" return BASE_URL # Process streaming response with updated headers and data async def process_streaming_response(request: ChatRequest): chat_id = generate_chat_id() referer_url = get_referer_url(chat_id, request.model) logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}") agent_mode = AGENT_MODE.get(request.model, {}) trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {}) headers_api_chat = get_headers_api_chat(referer_url) json_data = { "messages": [message_to_dict(msg) for msg in request.messages], "id": chat_id, "previewToken": None, "userId": None, "codeModelMode": True, "agentMode": agent_mode, "trendingAgentMode": trending_agent_mode, "isMicMode": False, "userSystemPrompt": None, "maxTokens": request.max_tokens, "playgroundTopP": request.top_p, "playgroundTemperature": request.temperature, "isChromeExt": False, "githubToken": None, "clickedAnswer2": False, "clickedAnswer3": False, "clickedForceWebSearch": False, "visitFromDelta": False, "mobileClient": False, "userSelectedModel": request.model if request.model in ["gpt-4o", "gemini-pro", "claude-sonnet-3.5", "blackboxai-pro"] else None, "webSearchMode": False, # Adjust if needed "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc", } async with httpx.AsyncClient() as client: try: async with client.stream( "POST", f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data, timeout=100, ) as response: response.raise_for_status() async for line in response.aiter_lines(): timestamp = int(datetime.now().timestamp()) if line: content = line.strip() # Handle special cases if necessary yield f"data: {json.dumps(create_chat_completion_data(content, request.model, timestamp))}\n\n" yield f"data: {json.dumps(create_chat_completion_data('', request.model, timestamp, 'stop'))}\n\n" yield "data: [DONE]\n\n" except httpx.HTTPStatusError as e: logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}") raise HTTPException(status_code=e.response.status_code, detail=str(e)) except httpx.RequestError as e: logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}") raise HTTPException(status_code=500, detail=str(e)) # Process non-streaming response with updated headers and data async def process_non_streaming_response(request: ChatRequest): chat_id = generate_chat_id() referer_url = get_referer_url(chat_id, request.model) logger.info(f"Generated Chat ID: {chat_id} - Model: {request.model} - URL: {referer_url}") agent_mode = AGENT_MODE.get(request.model, {}) trending_agent_mode = TRENDING_AGENT_MODE.get(request.model, {}) headers_api_chat = get_headers_api_chat(referer_url) headers_chat = get_headers_chat(referer_url, next_action=str(uuid.uuid4()), next_router_state_tree=json.dumps([""])) json_data = { "messages": [message_to_dict(msg) for msg in request.messages], "id": chat_id, "previewToken": None, "userId": None, "codeModelMode": True, "agentMode": agent_mode, "trendingAgentMode": trending_agent_mode, "isMicMode": False, "userSystemPrompt": None, "maxTokens": request.max_tokens, "playgroundTopP": request.top_p, "playgroundTemperature": request.temperature, "isChromeExt": False, "githubToken": None, "clickedAnswer2": False, "clickedAnswer3": False, "clickedForceWebSearch": False, "visitFromDelta": False, "mobileClient": False, "userSelectedModel": request.model if request.model in ["gpt-4o", "gemini-pro", "claude-sonnet-3.5", "blackboxai-pro"] else None, "webSearchMode": False, # Adjust if needed "validated": "69783381-2ce4-4dbd-ac78-35e9063feabc", } full_response = "" async with httpx.AsyncClient() as client: try: async with client.stream( method="POST", url=f"{BASE_URL}/api/chat", headers=headers_api_chat, json=json_data ) as response: response.raise_for_status() async for chunk in response.aiter_text(): full_response += chunk.strip() except httpx.HTTPStatusError as e: logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e}") raise HTTPException(status_code=e.response.status_code, detail=str(e)) except httpx.RequestError as e: logger.error(f"Error occurred during request for Chat ID {chat_id}: {e}") raise HTTPException(status_code=500, detail=str(e)) return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion", "created": int(datetime.now().timestamp()), "model": request.model, "choices": [ { "index": 0, "message": {"role": "assistant", "content": full_response}, "finish_reason": "stop", } ], "usage": None, }