Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -84,6 +84,8 @@ def image_generation(query):
|
|
84 |
return "output.jpg"
|
85 |
|
86 |
# Function to handle different input types and choose the right tool
|
|
|
|
|
87 |
def handle_input(user_prompt, image=None, audio=None, websearch=False):
|
88 |
if audio:
|
89 |
if isinstance(audio, str):
|
@@ -111,6 +113,7 @@ def handle_input(user_prompt, image=None, audio=None, websearch=False):
|
|
111 |
messages = [{"role": "user", "content": [image, user_prompt]}]
|
112 |
response = vqa_model.chat(image=None, msgs=messages, tokenizer=tokenizer)
|
113 |
else:
|
|
|
114 |
response = agent.chat(user_prompt)
|
115 |
|
116 |
# Extract the content from AgentChatResponse to return as a string
|
|
|
84 |
return "output.jpg"
|
85 |
|
86 |
# Function to handle different input types and choose the right tool
|
87 |
+
from llama_index.core.chat_engine.types import AgentChatResponse
|
88 |
+
|
89 |
def handle_input(user_prompt, image=None, audio=None, websearch=False):
|
90 |
if audio:
|
91 |
if isinstance(audio, str):
|
|
|
113 |
messages = [{"role": "user", "content": [image, user_prompt]}]
|
114 |
response = vqa_model.chat(image=None, msgs=messages, tokenizer=tokenizer)
|
115 |
else:
|
116 |
+
# Modify this part to check if a tool is required or if a direct answer suffices
|
117 |
response = agent.chat(user_prompt)
|
118 |
|
119 |
# Extract the content from AgentChatResponse to return as a string
|