File size: 1,969 Bytes
628f747 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 |
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import List, Dict, Any
import re
import random
import string
from aiohttp import ClientSession
# Mocking the ImageResponse and to_data_uri functions
class ImageResponse:
def __init__(self, url: str, alt: str):
self.url = url
self.alt = alt
def to_data_uri(image: Any) -> str:
# Placeholder for actual image encoding
return "data:image/png;base64,..." # Replace with actual base64 data
class AsyncGeneratorProvider:
pass
class ProviderModelMixin:
pass
class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://www.blackbox.ai"
api_endpoint = "https://www.blackbox.ai/api/chat"
default_model = 'blackbox'
models = ['blackbox', 'gemini-1.5-flash', "llama-3.1-8b"]
@classmethod
async def create_async_generator(cls, model: str, messages: List[Dict[str, str]]) -> Any:
# Mock response for demonstration
return {"content": "This is a mock response from the model."}
app = FastAPI()
class Message(BaseModel):
role: str
content: str
class ChatRequest(BaseModel):
model: str
messages: List[Message]
@app.post("/v1/chat/completions")
async def chat_completions(request: ChatRequest):
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
response = await Blackbox.create_async_generator(
model=request.model,
messages=messages
)
return {
"id": "chatcmpl-1234", # Example ID, generate as needed
"object": "chat.completion",
"created": 1690000000, # Replace with actual timestamp
"model": request.model,
"choices": [
{
"message": {
"role": "assistant",
"content": response['content']
},
"finish_reason": "stop",
"index": 0
}
]
}
|