from datetime import datetime import json import uuid import asyncio import random import string from typing import Any, Dict, Optional, AsyncGenerator import httpx from fastapi import HTTPException from api.config import ( models, model_aliases, ALLOWED_MODELS, MODEL_MAPPING, get_headers_api_chat, BASE_URL, ) from api.models import ChatRequest, Message from api.logger import setup_logger logger = setup_logger(__name__) # Editee API endpoint EDITE_API_ENDPOINT = "https://editee.com/submit/chatgptfree" # Helper function to create a random alphanumeric chat ID def generate_chat_id(length: int = 7) -> str: characters = string.ascii_letters + string.digits return ''.join(random.choices(characters, k=length)) # Helper function to create chat completion data def create_chat_completion_data( content: str, model: str, timestamp: int, finish_reason: Optional[str] = None ) -> Dict[str, Any]: return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion.chunk", "created": timestamp, "model": model, "choices": [ { "index": 0, "delta": {"content": content, "role": "assistant"}, "finish_reason": finish_reason, } ], "usage": None, } # Function to convert message to dictionary format, ensuring base64 data def message_to_dict(message: Message): if isinstance(message.content, str): content = message.content elif isinstance(message.content, list) and isinstance(message.content[0], dict) and "text" in message.content[0]: content = message.content[0]["text"] else: content = "" if isinstance(message.content, list) and len(message.content) == 2 and "image_url" in message.content[1]: # Ensure base64 images are always included for all models return { "role": message.role, "content": content, "data": { "imageBase64": message.content[1]["image_url"]["url"], "fileText": "", "title": "snapshot", }, } return {"role": message.role, "content": content} # Function to strip model prefix from content if present (Removed as MODEL_PREFIXES is removed) # def strip_model_prefix(content: str, model_prefix: Optional[str] = None) -> str: # """Remove the model prefix from the response content if present.""" # if model_prefix and content.startswith(model_prefix): # logger.debug(f"Stripping prefix '{model_prefix}' from content.") # return content[len(model_prefix):].strip() # return content # Function to get the correct referer URL for logging (Removed as MODEL_REFERERS is removed) # def get_referer_url(chat_id: str, model: str) -> str: # """Generate the referer URL based on specific models listed in MODEL_REFERERS.""" # if model in MODEL_REFERERS: # return f"{BASE_URL}/chat/{chat_id}?model={model}" # return BASE_URL # Function to resolve model aliases def resolve_model(model: str) -> str: if model in MODEL_MAPPING: return model elif model in model_aliases: return model_aliases[model] else: logger.warning(f"Model '{model}' not recognized. Using default model '{default_model}'.") return "claude" # default_model # Process streaming response with headers from config.py async def process_streaming_response(request: ChatRequest) -> AsyncGenerator[str, None]: chat_id = generate_chat_id() resolved_model = resolve_model(request.model) # referer_url = get_referer_url(chat_id, resolved_model) # Removed logger.info(f"Generated Chat ID: {chat_id} - Model: {resolved_model}") # model_prefix = MODEL_PREFIXES.get(resolved_model, "") # Removed headers_api_chat = get_headers_api_chat(BASE_URL) # Using BASE_URL as referer # Removed agent mode and delay logic prompt = format_prompt(request.messages) data = { "user_input": prompt, "context": " ", "template_id": "", "selected_model": resolved_model } async with httpx.AsyncClient() as client: try: response = await client.post( EDITE_API_ENDPOINT, headers=headers_api_chat, json=data, timeout=100, ) response.raise_for_status() response_data = response.json() # Assuming response_data contains 'text' field text = response_data.get('text', '') timestamp = int(datetime.now().timestamp()) if text: # cleaned_content = strip_model_prefix(text, model_prefix) # Removed yield f"data: {json.dumps(create_chat_completion_data(text, resolved_model, timestamp))}\n\n" # Indicate completion yield f"data: {json.dumps(create_chat_completion_data('', resolved_model, timestamp, 'stop'))}\n\n" yield "data: [DONE]\n\n" except httpx.HTTPStatusError as e: logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e.response.status_code} - {e.response.text}") raise HTTPException(status_code=e.response.status_code, detail=str(e)) except httpx.RequestError as e: logger.error(f"Request error occurred for Chat ID {chat_id}: {e}") raise HTTPException(status_code=500, detail=str(e)) # Process non-streaming response with headers from config.py async def process_non_streaming_response(request: ChatRequest) -> Dict[str, Any]: chat_id = generate_chat_id() resolved_model = resolve_model(request.model) # referer_url = get_referer_url(chat_id, resolved_model) # Removed logger.info(f"Generated Chat ID: {chat_id} - Model: {resolved_model}") # model_prefix = MODEL_PREFIXES.get(resolved_model, "") # Removed headers_api_chat = get_headers_api_chat(BASE_URL) # Using BASE_URL as referer # Removed agent mode and delay logic prompt = format_prompt(request.messages) data = { "user_input": prompt, "context": " ", "template_id": "", "selected_model": resolved_model } async with httpx.AsyncClient() as client: try: response = await client.post( EDITE_API_ENDPOINT, headers=headers_api_chat, json=data, timeout=100, ) response.raise_for_status() response_data = response.json() text = response_data.get('text', '') # if text.startswith("$@$v=undefined-rv1$@$"): # text = text[21:] # Removed # cleaned_full_response = strip_model_prefix(text, model_prefix) # Removed return { "id": f"chatcmpl-{uuid.uuid4()}", "object": "chat.completion", "created": int(datetime.now().timestamp()), "model": resolved_model, "choices": [ { "index": 0, "message": {"role": "assistant", "content": text}, "finish_reason": "stop", } ], "usage": None, } except httpx.HTTPStatusError as e: logger.error(f"HTTP error occurred for Chat ID {chat_id}: {e.response.status_code} - {e.response.text}") raise HTTPException(status_code=e.response.status_code, detail=str(e)) except httpx.RequestError as e: logger.error(f"Request error occurred for Chat ID {chat_id}: {e}") raise HTTPException(status_code=500, detail=str(e)) # Helper function to format prompt from messages def format_prompt(messages: list[Message]) -> str: # Implement the prompt formatting as per Editee's requirements # Placeholder implementation formatted_messages = [] for msg in messages: if isinstance(msg.content, str): formatted_messages.append(msg.content) elif isinstance(msg.content, list): text = msg.content[0].get("text", "") formatted_messages.append(text) return "\n".join(formatted_messages)