Update main.py
Browse files
main.py
CHANGED
@@ -47,7 +47,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
47 |
'blackbox',
|
48 |
'gemini-1.5-flash',
|
49 |
"llama-3.1-8b",
|
50 |
-
'llama-3.1-70b',
|
51 |
'llama-3.1-405b',
|
52 |
'ImageGenerationLV45LJp',
|
53 |
'gpt-4o',
|
@@ -55,6 +55,19 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
55 |
'claude-sonnet-3.5',
|
56 |
]
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
agentMode = {
|
59 |
'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
60 |
}
|
@@ -102,7 +115,7 @@ class Blackbox(AsyncGeneratorProvider, ProviderModelMixin):
|
|
102 |
model = cls.get_model(model)
|
103 |
|
104 |
# Check if the model is working
|
105 |
-
if not cls.
|
106 |
raise ModelNotWorkingException(model)
|
107 |
|
108 |
headers = {
|
@@ -226,8 +239,8 @@ async def chat_completions(request: ChatRequest):
|
|
226 |
async_generator = Blackbox.create_async_generator(
|
227 |
model=request.model,
|
228 |
messages=messages,
|
229 |
-
image=None,
|
230 |
-
image_name=None
|
231 |
)
|
232 |
except ModelNotWorkingException as e:
|
233 |
raise HTTPException(status_code=503, detail=str(e))
|
@@ -236,6 +249,7 @@ async def chat_completions(request: ChatRequest):
|
|
236 |
async def generate():
|
237 |
async for chunk in async_generator:
|
238 |
if isinstance(chunk, ImageResponse):
|
|
|
239 |
image_markdown = f""
|
240 |
yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n"
|
241 |
else:
|
@@ -249,7 +263,7 @@ async def chat_completions(request: ChatRequest):
|
|
249 |
if isinstance(chunk, ImageResponse):
|
250 |
response_content += f"\n"
|
251 |
else:
|
252 |
-
response_content += chunk
|
253 |
|
254 |
return {
|
255 |
"id": f"chatcmpl-{uuid.uuid4()}",
|
|
|
47 |
'blackbox',
|
48 |
'gemini-1.5-flash',
|
49 |
"llama-3.1-8b",
|
50 |
+
'llama-3.1-70b', # Example of a non-working model
|
51 |
'llama-3.1-405b',
|
52 |
'ImageGenerationLV45LJp',
|
53 |
'gpt-4o',
|
|
|
55 |
'claude-sonnet-3.5',
|
56 |
]
|
57 |
|
58 |
+
# Define the working status of models
|
59 |
+
model_status = {
|
60 |
+
'blackbox': True,
|
61 |
+
'gemini-1.5-flash': True,
|
62 |
+
'llama-3.1-8b': True,
|
63 |
+
'llama-3.1-70b': False, # Non-working model
|
64 |
+
'llama-3.1-405b': True,
|
65 |
+
'ImageGenerationLV45LJp': True,
|
66 |
+
'gpt-4o': True,
|
67 |
+
'gemini-pro': True,
|
68 |
+
'claude-sonnet-3.5': True,
|
69 |
+
}
|
70 |
+
|
71 |
agentMode = {
|
72 |
'ImageGenerationLV45LJp': {'mode': True, 'id': "ImageGenerationLV45LJp", 'name': "Image Generation"},
|
73 |
}
|
|
|
115 |
model = cls.get_model(model)
|
116 |
|
117 |
# Check if the model is working
|
118 |
+
if not cls.model_status.get(model, False):
|
119 |
raise ModelNotWorkingException(model)
|
120 |
|
121 |
headers = {
|
|
|
239 |
async_generator = Blackbox.create_async_generator(
|
240 |
model=request.model,
|
241 |
messages=messages,
|
242 |
+
image=None, # Pass the image if required
|
243 |
+
image_name=None # Pass image name if required
|
244 |
)
|
245 |
except ModelNotWorkingException as e:
|
246 |
raise HTTPException(status_code=503, detail=str(e))
|
|
|
249 |
async def generate():
|
250 |
async for chunk in async_generator:
|
251 |
if isinstance(chunk, ImageResponse):
|
252 |
+
# Format the response as a Markdown image
|
253 |
image_markdown = f""
|
254 |
yield f"data: {json.dumps(create_response(image_markdown, request.model))}\n\n"
|
255 |
else:
|
|
|
263 |
if isinstance(chunk, ImageResponse):
|
264 |
response_content += f"\n"
|
265 |
else:
|
266 |
+
response_content += chunk # Concatenate text responses
|
267 |
|
268 |
return {
|
269 |
"id": f"chatcmpl-{uuid.uuid4()}",
|