ScouterAI / app.py
stevenbucaille's picture
Update app.py
b19dcac verified
raw
history blame
4.16 kB
from typing import List
import gradio as gr
import PIL
from gradio import ChatMessage
from smolagents.gradio_ui import stream_to_gradio
from agents.all_agents import get_master_agent
from llm import get_default_model
gr.set_static_paths(paths=["images/"])
master_agent = get_master_agent(get_default_model())
print(master_agent)
def resize_image(image):
width, height = image.size
if width > 1200 or height > 800:
ratio = min(1200 / width, 800 / height)
new_width = int(width * ratio)
new_height = int(height * ratio)
resized_image = image.resize((new_width, new_height), PIL.Image.Resampling.LANCZOS)
return resized_image
return image
def chat_interface_fn(input_request, history: List[ChatMessage], gallery):
if gallery is None:
gallery = []
else:
gallery = [value[0] for value in gallery]
message = input_request["text"]
image_paths = input_request["files"]
prompt = f"""
You are given the following message from the user:
{message}
"""
if len(image_paths) > 0:
prompt += """
The user also provided the additional images that you can find in "images" variable
"""
if len(history) > 0:
prompt += "This request follows a previous request, you can use the previous request to help you answer the current request."
prompt += """
Before your final answer, if you have any images to show, store them in the "final_images" variable.
Always return a text of what you did.
"""
images = [PIL.Image.open(image_path) for image_path in image_paths]
if len(gallery) > 0:
images.extend(gallery)
resized_images = [resize_image(image) for image in images]
for message in stream_to_gradio(
master_agent,
task=prompt,
task_images=resized_images,
additional_args={"images": images},
reset_agent_memory=False,
):
history.append(message)
yield history, None
final_images = master_agent.python_executor.state.get("final_images", [])
gallery.extend(final_images)
yield history, gallery
def example_selected(example):
textbox.value = example[0]
image_box.value = example[1]
example = {
"text": example[0],
"files": [
{
"url": example[1],
"path": example[1],
"name": example[1],
}
],
}
return example
with gr.Blocks() as demo:
gr.Markdown(
"""
# ScouterAI
![image/png](https://cdn-uploads.huggingface.co/production/uploads/632885ba1558dac67c440aa8/j7fUk65sQsQ3o7fdfG5TH.png){ width="800" height="600" style="display: block; margin: 0 auto" }
Welcome to ScouterAI, the Agent that is capable of detecting over 9000 entities in images using the best models of the HuggingFace Hub.
""")
output_gallery = gr.Gallery(label="Output Gallery", type="pil", format="png")
textbox = gr.MultimodalTextbox()
gr.ChatInterface(
chat_interface_fn,
type="messages",
multimodal=True,
textbox=textbox,
additional_inputs=[output_gallery],
additional_outputs=[output_gallery],
)
text_box = gr.Textbox(label="Text", visible=False)
image_box = gr.Image(label="Image", visible=False)
dataset = gr.Dataset(
samples=[
[
"I would like to detect all the cars in the image",
"https://upload.wikimedia.org/wikipedia/commons/5/51/Crossing_the_Hudson_River_on_the_George_Washington_Bridge_from_Fort_Lee%2C_New_Jersey_to_Manhattan%2C_New_York_%287237796950%29.jpg",
],
[
"Find vegetables in the image and annotate the image with their masks",
"https://media.istockphoto.com/id/1203599923/fr/photo/fond-de-nourriture-avec-lassortiment-des-l%C3%A9gumes-organiques-frais.jpg?s=612x612&w=0&k=20&c=Yu8nfOYI9YZ0UTpb7iFqX8OHp9wfvd9keMQ0BZIzhWs=",
],
],
components=[text_box, image_box],
label="Examples",
)
dataset.select(example_selected, [dataset], [textbox])
demo.launch()