ScouterAI / app.py
stevenbucaille's picture
update
33eaa22
from typing import List
import gradio as gr
import PIL
from gradio import ChatMessage
from smolagents.gradio_ui import stream_to_gradio
from agents.all_agents import get_master_agent
from llm import ANTHROPIC_MODEL_IDS, get_anthropic_model
gr.set_static_paths(paths=["images/"])
def resize_image(image):
width, height = image.size
if width > 1200 or height > 800:
ratio = min(1200 / width, 800 / height)
new_width = int(width * ratio)
new_height = int(height * ratio)
resized_image = image.resize((new_width, new_height), PIL.Image.Resampling.LANCZOS)
return resized_image
return image
def chat_interface_fn(input_request, history: List[ChatMessage], gallery, anthropic_api_key, anthropic_model_id):
model = get_anthropic_model(anthropic_model_id, anthropic_api_key)
agent = get_master_agent(model)
if gallery is None:
gallery = []
else:
gallery = [value[0] for value in gallery]
message = input_request["text"]
image_paths = input_request["files"]
prompt = f"""
You are given the following message from the user:
{message}
"""
if len(image_paths) > 0:
prompt += """
The user also provided the additional images that you can find in "images" variable
"""
if len(history) > 0:
prompt += "This request follows a previous request, you can use the previous request to help you answer the current request."
prompt += """
Before your final answer, if you have any images to show, store them in the "final_images" variable.
Always return a text of what you did.
Never assume an invented model name, always use the model name provided by the task_model_retriever tool.
"""
images = [PIL.Image.open(image_path) for image_path in image_paths]
if len(gallery) > 0:
images.extend(gallery)
resized_images = [resize_image(image) for image in images]
for message in stream_to_gradio(
agent,
task=prompt,
task_images=resized_images,
additional_args={"images": images},
reset_agent_memory=False,
):
history.append(message)
yield history, None
final_images = agent.python_executor.state.get("final_images", [])
gallery.extend(final_images)
yield history, gallery
def example_selected(example):
textbox.value = example[0]
image_box.value = example[1]
example = {
"text": example[0],
"files": [
{
"url": example[1],
"path": example[1],
"name": example[1],
}
],
}
return example
with gr.Blocks() as demo:
gr.Markdown(
"""
# ScouterAI
"""
)
gr.HTML(
"""
<div style="display: flex; align-items: center; gap: 20px; margin: 20px 0;">
<img src="https://cdn-uploads.huggingface.co/production/uploads/632885ba1558dac67c440aa8/KpMuW4Qvrh5N-FMcVKKqG.png"
alt="Picture"
style="max-height: 350px; flex-shrink: 0;" />
<div style="flex-grow: 1;">
<p style="margin: 0; font-size: 1.1em;">
<p style="font-size: 1.8em; margin-bottom: 10px; font-weight: bold">Welcome to ScouterAI</p>
<p style="font-size: 1.2em;">The agent capable of identifying the best
model among the entire HuggingFace Hub to use for your needs.</p>
This Space focuses on using agentic reasoning to plan the use of multiple models to perform vision tasks.
<br>
To answer your request, the agent will use the following models from the hub:
<br>
<ul>
<li><a href="https://huggingface.co/models?pipeline_tag=object-detection&library=transformers&sort=trending">Object detection</a></li>
<li><a href="https://huggingface.co/models?pipeline_tag=image-segmentation&library=transformers&sort=trending">Image segmentation</a></li>
<li><a href="https://huggingface.co/models?pipeline_tag=image-classification&library=transformers&sort=trending">Image classification</a></li>
</ul>
The agent can resize and crop images as well as annotating it with bounding boxes, masks and labels.
<br>
<br>
Type your request and add images to the textbox below or click on one of the examples to see how <strong style="font-size: 1.5em;">powerful</strong> it is.
</p>
</div>
</div>
""",
)
gr.Markdown(
"""
## Update 17/06/2025
This Space was originally a Hackathon submission, funded with Anthropic Free Credits.<br>
Due to the high popularity of the Space, unfortunately I can't fund personally the credits anymore.<br>
I have added below the ability to add your own Anthropic API Key and select the model to use.<br>
"""
)
anthropic_api_key = gr.Textbox(label="Anthropic API Key")
anthropic_model_id = gr.Dropdown(label="Anthropic Model", choices=ANTHROPIC_MODEL_IDS)
gr.Markdown(
"""
## Future plans
I plan to continue developing this Space on a more personal space here : https://huggingface.co/spaces/stevenbucaille/ScouterAI <br>
This Space will be powered with ZeroGPU and have more LLM options.<br>
Don't hesitate to like this other Space or reach out to me on <a href="https://www.linkedin.com/in/sbucaille/">LinkedIn</a> if you have any questions or feedback!<br>
Stay tuned!
<br>
"""
)
output_gallery = gr.Gallery(label="Images generated by the agent (do not put images)", type="pil", format="png")
textbox = gr.MultimodalTextbox()
gr.ChatInterface(
chat_interface_fn,
type="messages",
multimodal=True,
textbox=textbox,
additional_inputs=[output_gallery, anthropic_api_key, anthropic_model_id],
additional_outputs=[output_gallery],
)
text_box = gr.Textbox(label="Text", visible=False)
image_box = gr.Image(label="Image", visible=False)
dataset = gr.Dataset(
samples=[
[
"I would like to detect all the cars in the image",
"https://upload.wikimedia.org/wikipedia/commons/5/51/Crossing_the_Hudson_River_on_the_George_Washington_Bridge_from_Fort_Lee%2C_New_Jersey_to_Manhattan%2C_New_York_%287237796950%29.jpg",
],
[
"Find vegetables in the image and annotate the image with their masks",
"https://media.istockphoto.com/id/1203599923/fr/photo/fond-de-nourriture-avec-lassortiment-des-l%C3%A9gumes-organiques-frais.jpg?s=612x612&w=0&k=20&c=Yu8nfOYI9YZ0UTpb7iFqX8OHp9wfvd9keMQ0BZIzhWs=",
],
[
"Detect each dog in the image and identify its breed, then provide a crop of each dog and annotate the original image with a bounding box and a label",
"https://images.pexels.com/photos/10094979/pexels-photo-10094979.jpeg",
],
],
components=[text_box, image_box],
label="Examples",
)
dataset.select(example_selected, [dataset], [textbox])
demo.launch()