Update main.py
Browse files
main.py
CHANGED
@@ -12,6 +12,13 @@ from typing import List, Dict, Any, Optional
|
|
12 |
from datetime import datetime
|
13 |
from fastapi.responses import StreamingResponse
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# Mock implementations for ImageResponse and to_data_uri
|
16 |
class ImageResponse:
|
17 |
def __init__(self, url: str, alt: str):
|
@@ -19,7 +26,6 @@ class ImageResponse:
|
|
19 |
self.alt = alt
|
20 |
|
21 |
def to_data_uri(image: Any) -> str:
|
22 |
-
# Placeholder for actual image encoding
|
23 |
return "data:image/png;base64,..." # Replace with actual base64 data
|
24 |
|
25 |
class AsyncGeneratorProvider:
|
@@ -94,7 +100,11 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
94 |
**kwargs
|
95 |
) -> Any:
|
96 |
model = cls.get_model(model)
|
97 |
-
|
|
|
|
|
|
|
|
|
98 |
headers = {
|
99 |
"accept": "*/*",
|
100 |
"accept-language": "en-US,en;q=0.9",
|
@@ -212,18 +222,20 @@ async def chat_completions(request: ChatRequest):
|
|
212 |
|
213 |
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
|
214 |
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
|
|
|
|
|
|
221 |
|
222 |
if request.stream:
|
223 |
async def generate():
|
224 |
async for chunk in async_generator:
|
225 |
if isinstance(chunk, ImageResponse):
|
226 |
-
# Format the response as a Markdown image
|
227 |
image_markdown = f""
|
228 |
yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n"
|
229 |
else:
|
@@ -235,10 +247,9 @@ async def chat_completions(request: ChatRequest):
|
|
235 |
response_content = ""
|
236 |
async for chunk in async_generator:
|
237 |
if isinstance(chunk, ImageResponse):
|
238 |
-
# Add Markdown image to the response
|
239 |
response_content += f"\n"
|
240 |
else:
|
241 |
-
response_content += chunk
|
242 |
|
243 |
return {
|
244 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
@@ -258,7 +269,6 @@ async def chat_completions(request: ChatRequest):
|
|
258 |
"usage": None,
|
259 |
}
|
260 |
|
261 |
-
|
262 |
@app.get("/niansuhai/v1/models")
|
263 |
async def get_models():
|
264 |
-
return {"models": Blackbox.models}
|
|
|
12 |
from datetime import datetime
|
13 |
from fastapi.responses import StreamingResponse
|
14 |
|
15 |
+
# Custom exception for model not working
|
16 |
+
class ModelNotWorkingException(Exception):
|
17 |
+
def __init__(self, model: str):
|
18 |
+
self.model = model
|
19 |
+
self.message = f"The model '{model}' is currently not working. Please wait for NiansuhAI to fix this. Thank you for your patience."
|
20 |
+
super().__init__(self.message)
|
21 |
+
|
22 |
# Mock implementations for ImageResponse and to_data_uri
|
23 |
class ImageResponse:
|
24 |
def __init__(self, url: str, alt: str):
|
|
|
26 |
self.alt = alt
|
27 |
|
28 |
def to_data_uri(image: Any) -> str:
|
|
|
29 |
return "data:image/png;base64,..." # Replace with actual base64 data
|
30 |
|
31 |
class AsyncGeneratorProvider:
|
|
|
100 |
**kwargs
|
101 |
) -> Any:
|
102 |
model = cls.get_model(model)
|
103 |
+
|
104 |
+
# Check if the model is working
|
105 |
+
if not cls.working or model not in cls.models:
|
106 |
+
raise ModelNotWorkingException(model)
|
107 |
+
|
108 |
headers = {
|
109 |
"accept": "*/*",
|
110 |
"accept-language": "en-US,en;q=0.9",
|
|
|
222 |
|
223 |
messages = [{"role": msg.role, "content": msg.content} for msg in request.messages]
|
224 |
|
225 |
+
try:
|
226 |
+
async_generator = Blackbox.create_async_generator(
|
227 |
+
model=request.model,
|
228 |
+
messages=messages,
|
229 |
+
image=None,
|
230 |
+
image_name=None
|
231 |
+
)
|
232 |
+
except ModelNotWorkingException as e:
|
233 |
+
raise HTTPException(status_code=503, detail=str(e))
|
234 |
|
235 |
if request.stream:
|
236 |
async def generate():
|
237 |
async for chunk in async_generator:
|
238 |
if isinstance(chunk, ImageResponse):
|
|
|
239 |
image_markdown = f""
|
240 |
yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n"
|
241 |
else:
|
|
|
247 |
response_content = ""
|
248 |
async for chunk in async_generator:
|
249 |
if isinstance(chunk, ImageResponse):
|
|
|
250 |
response_content += f"\n"
|
251 |
else:
|
252 |
+
response_content += chunk
|
253 |
|
254 |
return {
|
255 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
|
|
269 |
"usage": None,
|
270 |
}
|
271 |
|
|
|
272 |
@app.get("/niansuhai/v1/models")
|
273 |
async def get_models():
|
274 |
+
return {"models": Blackbox.models}
|