Spaces:
Running
Running
File size: 7,378 Bytes
111afa2 3014996 111afa2 3014996 33eaa22 111afa2 3014996 111afa2 33eaa22 111afa2 3014996 111afa2 3014996 111afa2 3014996 111afa2 3014996 518d841 3014996 111afa2 3014996 3f0e775 3014996 3f0e775 111afa2 3014996 b19dcac 518d841 b19dcac 8aeea34 618ff49 518d841 333aa09 518d841 333aa09 518d841 618ff49 33eaa22 518d841 111afa2 3014996 111afa2 33eaa22 3014996 111afa2 518d841 bf34015 518d841 111afa2 3014996 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
from typing import List
import gradio as gr
import PIL
from gradio import ChatMessage
from smolagents.gradio_ui import stream_to_gradio
from agents.all_agents import get_master_agent
from llm import ANTHROPIC_MODEL_IDS, get_anthropic_model
gr.set_static_paths(paths=["images/"])
def resize_image(image):
width, height = image.size
if width > 1200 or height > 800:
ratio = min(1200 / width, 800 / height)
new_width = int(width * ratio)
new_height = int(height * ratio)
resized_image = image.resize((new_width, new_height), PIL.Image.Resampling.LANCZOS)
return resized_image
return image
def chat_interface_fn(input_request, history: List[ChatMessage], gallery, anthropic_api_key, anthropic_model_id):
model = get_anthropic_model(anthropic_model_id, anthropic_api_key)
agent = get_master_agent(model)
if gallery is None:
gallery = []
else:
gallery = [value[0] for value in gallery]
message = input_request["text"]
image_paths = input_request["files"]
prompt = f"""
You are given the following message from the user:
{message}
"""
if len(image_paths) > 0:
prompt += """
The user also provided the additional images that you can find in "images" variable
"""
if len(history) > 0:
prompt += "This request follows a previous request, you can use the previous request to help you answer the current request."
prompt += """
Before your final answer, if you have any images to show, store them in the "final_images" variable.
Always return a text of what you did.
Never assume an invented model name, always use the model name provided by the task_model_retriever tool.
"""
images = [PIL.Image.open(image_path) for image_path in image_paths]
if len(gallery) > 0:
images.extend(gallery)
resized_images = [resize_image(image) for image in images]
for message in stream_to_gradio(
agent,
task=prompt,
task_images=resized_images,
additional_args={"images": images},
reset_agent_memory=False,
):
history.append(message)
yield history, None
final_images = agent.python_executor.state.get("final_images", [])
gallery.extend(final_images)
yield history, gallery
def example_selected(example):
textbox.value = example[0]
image_box.value = example[1]
example = {
"text": example[0],
"files": [
{
"url": example[1],
"path": example[1],
"name": example[1],
}
],
}
return example
with gr.Blocks() as demo:
gr.Markdown(
"""
# ScouterAI
"""
)
gr.HTML(
"""
<div style="display: flex; align-items: center; gap: 20px; margin: 20px 0;">
<img src="https://cdn-uploads.huggingface.co/production/uploads/632885ba1558dac67c440aa8/KpMuW4Qvrh5N-FMcVKKqG.png"
alt="Picture"
style="max-height: 350px; flex-shrink: 0;" />
<div style="flex-grow: 1;">
<p style="margin: 0; font-size: 1.1em;">
<p style="font-size: 1.8em; margin-bottom: 10px; font-weight: bold">Welcome to ScouterAI</p>
<p style="font-size: 1.2em;">The agent capable of identifying the best
model among the entire HuggingFace Hub to use for your needs.</p>
This Space focuses on using agentic reasoning to plan the use of multiple models to perform vision tasks.
<br>
To answer your request, the agent will use the following models from the hub:
<br>
<ul>
<li><a href="https://huggingface.co/models?pipeline_tag=object-detection&library=transformers&sort=trending">Object detection</a></li>
<li><a href="https://huggingface.co/models?pipeline_tag=image-segmentation&library=transformers&sort=trending">Image segmentation</a></li>
<li><a href="https://huggingface.co/models?pipeline_tag=image-classification&library=transformers&sort=trending">Image classification</a></li>
</ul>
The agent can resize and crop images as well as annotating it with bounding boxes, masks and labels.
<br>
<br>
Type your request and add images to the textbox below or click on one of the examples to see how <strong style="font-size: 1.5em;">powerful</strong> it is.
</p>
</div>
</div>
""",
)
gr.Markdown(
"""
## Update 17/06/2025
This Space was originally a Hackathon submission, funded with Anthropic Free Credits.<br>
Due to the high popularity of the Space, unfortunately I can't fund personally the credits anymore.<br>
I have added below the ability to add your own Anthropic API Key and select the model to use.<br>
"""
)
anthropic_api_key = gr.Textbox(label="Anthropic API Key")
anthropic_model_id = gr.Dropdown(label="Anthropic Model", choices=ANTHROPIC_MODEL_IDS)
gr.Markdown(
"""
## Future plans
I plan to continue developing this Space on a more personal space here : https://huggingface.co/spaces/stevenbucaille/ScouterAI <br>
This Space will be powered with ZeroGPU and have more LLM options.<br>
Don't hesitate to like this other Space or reach out to me on <a href="https://www.linkedin.com/in/sbucaille/">LinkedIn</a> if you have any questions or feedback!<br>
Stay tuned!
<br>
"""
)
output_gallery = gr.Gallery(label="Images generated by the agent (do not put images)", type="pil", format="png")
textbox = gr.MultimodalTextbox()
gr.ChatInterface(
chat_interface_fn,
type="messages",
multimodal=True,
textbox=textbox,
additional_inputs=[output_gallery, anthropic_api_key, anthropic_model_id],
additional_outputs=[output_gallery],
)
text_box = gr.Textbox(label="Text", visible=False)
image_box = gr.Image(label="Image", visible=False)
dataset = gr.Dataset(
samples=[
[
"I would like to detect all the cars in the image",
"https://upload.wikimedia.org/wikipedia/commons/5/51/Crossing_the_Hudson_River_on_the_George_Washington_Bridge_from_Fort_Lee%2C_New_Jersey_to_Manhattan%2C_New_York_%287237796950%29.jpg",
],
[
"Find vegetables in the image and annotate the image with their masks",
"https://media.istockphoto.com/id/1203599923/fr/photo/fond-de-nourriture-avec-lassortiment-des-l%C3%A9gumes-organiques-frais.jpg?s=612x612&w=0&k=20&c=Yu8nfOYI9YZ0UTpb7iFqX8OHp9wfvd9keMQ0BZIzhWs=",
],
[
"Detect each dog in the image and identify its breed, then provide a crop of each dog and annotate the original image with a bounding box and a label",
"https://images.pexels.com/photos/10094979/pexels-photo-10094979.jpeg",
],
],
components=[text_box, image_box],
label="Examples",
)
dataset.select(example_selected, [dataset], [textbox])
demo.launch()
|