Spaces:
Running
Running
Update chatbot.py
Browse files- chatbot.py +10 -9
chatbot.py
CHANGED
@@ -212,7 +212,7 @@ client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
|
|
212 |
client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
213 |
client_mistral_nemo = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
|
214 |
|
215 |
-
@spaces.CPU(duration=60, queue=False)
|
216 |
def model_inference( user_prompt, chat_history):
|
217 |
if user_prompt["files"]:
|
218 |
inputs = llava(user_prompt, chat_history)
|
@@ -305,14 +305,15 @@ def model_inference( user_prompt, chat_history):
|
|
305 |
query = json_data["arguments"]["query"]
|
306 |
gr.Info("Generating Image, Please wait 10 sec...")
|
307 |
yield "Generating Image, Please wait 10 sec..."
|
308 |
-
try:
|
309 |
-
|
310 |
-
|
311 |
-
except:
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
|
|
316 |
|
317 |
|
318 |
elif json_data["name"] == "video_generation":
|
|
|
212 |
client_llama = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
|
213 |
client_mistral_nemo = InferenceClient("mistralai/Mistral-Nemo-Instruct-2407")
|
214 |
|
215 |
+
# @spaces.CPU(duration=60, queue=False)
|
216 |
def model_inference( user_prompt, chat_history):
|
217 |
if user_prompt["files"]:
|
218 |
inputs = llava(user_prompt, chat_history)
|
|
|
305 |
query = json_data["arguments"]["query"]
|
306 |
gr.Info("Generating Image, Please wait 10 sec...")
|
307 |
yield "Generating Image, Please wait 10 sec..."
|
308 |
+
# try:
|
309 |
+
# image = image_gen(f"{str(query)}")
|
310 |
+
# yield gr.Image(image[1])
|
311 |
+
# except:
|
312 |
+
client_sd3 = InferenceClient("stabilityai/stable-diffusion-3-medium-diffusers")
|
313 |
+
seed = random.randint(0,999999)
|
314 |
+
image = client_sd3.text_to_image(query, negative_prompt=f"{seed}")
|
315 |
+
gr.Info("Using Stability diffusion 3")
|
316 |
+
yield gr.Image(image)
|
317 |
|
318 |
|
319 |
elif json_data["name"] == "video_generation":
|