File size: 3,725 Bytes
4295e27 23e063f 4295e27 23e063f e19211a 23e063f 4295e27 e19211a 23e063f e19211a 4295e27 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import uuid
from typing import List, Dict
from aiohttp import ClientSession
from api.models import ChatRequest
from api.helper import format_prompt
from api.logger import logger
from api.config import MODEL_MAPPING, EDITEA_API_ENDPOINT, EDITEA_HEADERS
from fastapi import HTTPException
# Editee class is now integrated within utils.py
class Editee:
label = "Editee"
url = "https://editee.com"
api_endpoint = EDITEA_API_ENDPOINT
working = True
supports_stream = True
supports_system_message = True
supports_message_history = True
default_model = 'claude'
models = ['claude', 'gpt4', 'gemini', 'mistrallarge']
model_aliases = {
"claude-3.5-sonnet": "claude",
"gpt-4o": "gpt4",
"gemini-pro": "gemini",
"mistral-large": "mistrallarge",
}
@classmethod
def get_model(cls, model: str) -> str:
if model in cls.models:
return model
elif model in cls.model_aliases:
return cls.model_aliases[model]
else:
return cls.default_model
@classmethod
async def create_async_generator(
cls,
model: str,
messages: List[Dict[str, str]],
proxy: str = None,
**kwargs
):
model = cls.get_model(model)
headers = EDITEA_HEADERS
async with ClientSession(headers=headers) as session:
prompt = format_prompt(messages)
data = {
"user_input": prompt,
"context": " ",
"template_id": "",
"selected_model": model
}
try:
async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
response.raise_for_status()
if response.content_type == 'text/event-stream':
async for line in response.content:
yield line.decode('utf-8')
else:
response_data = await response.json()
yield response_data['text']
except Exception as e:
logger.error(f"Error in Editee API call: {e}")
raise HTTPException(status_code=500, detail="Error in Editee API call")
# Function to process the response
async def process_response(request: ChatRequest, stream: bool = False):
try:
model = MODEL_MAPPING.get(request.model, request.model)
messages = [
{"role": message.role, "content": message.content}
for message in request.messages
]
generator = Editee.create_async_generator(
model=model,
messages=messages,
proxy=None # Add proxy if needed
)
if stream:
async def event_generator():
async for chunk in generator:
yield f"data: {chunk}\n\n"
return event_generator()
else:
full_response = ""
async for chunk in generator:
full_response += chunk
return {
"id": f"chatcmpl-{uuid.uuid4()}",
"object": "chat.completion",
"created": int(uuid.uuid1().time),
"model": model,
"choices": [
{
"index": 0,
"message": {"role": "assistant", "content": full_response},
"finish_reason": "stop",
}
],
"usage": None,
}
except Exception as e:
logger.error(f"Error processing response: {e}")
raise HTTPException(status_code=500, detail=str(e))
|