import glob
import gradio as gr
from huggingface_hub import get_token
from utils import (
load_model,
load_image_from_url,
inference,
load_badges,
count_flagged_images_from_csv,
)
from flagging import myHuggingFaceDatasetSaver
TITLE = """
RGB Detection Demo
Give it a try! Upload an image or enter a URL to an image and click
Submit
.
"""
NOTICE = """
See something off? Your feedback makes a difference! Let us know by
flagging any outcomes that don't seem right. Just click on `Flag`
to submit the image for review. Note that by clicking `Flag`, you
agree to the use of your image for A.I. improvement purposes.
"""
css = """
h1 {
text-align: center;
display: block;
}
"""
model = load_model("SEA-AI/yolov5n6-RGB", img_size=1280)
model.conf = 0.2
model.iou = 0.4
model.max_det = 100
model.agnostic = True # NMS class-agnostic
# Flagging
dataset_name = "SEA-AI/crowdsourced-sea-images"
hf_writer = myHuggingFaceDatasetSaver(get_token(), dataset_name)
def get_flagged_count():
"""Count flagged images in dataset."""
return count_flagged_images_from_csv(dataset_name)
theme = gr.themes.Default(primary_hue=gr.themes.colors.indigo)
with gr.Blocks(theme=theme, css=css) as demo:
badges = gr.HTML(load_badges(get_flagged_count()))
title = gr.HTML(TITLE)
with gr.Row():
with gr.Column():
img_input = gr.Image(label="input", interactive=True)
img_url = gr.Textbox(
lines=1,
placeholder="or enter URL to image here",
label="input_url",
show_label=False,
)
with gr.Row():
clear = gr.ClearButton()
submit = gr.Button("Submit", variant="primary")
with gr.Column():
img_output = gr.Image(
label="output", interactive=False, show_share_button=True
)
flag = gr.Button("Flag", visible=False)
notice = gr.Markdown(value=NOTICE, visible=False)
gr.Examples(
examples=glob.glob("examples/*.jpg"),
inputs=img_input,
outputs=img_output,
fn=lambda image: inference(model, image),
cache_examples=True,
)
# add components to clear when clear button is clicked
clear.add([img_input, img_url, img_output])
# event listeners
img_url.change(load_image_from_url, [img_url], img_input)
submit.click(
lambda image: inference(model, image),
[img_input],
img_output,
api_name="inference",
)
# event listeners with decorators
@img_output.change(inputs=[img_output], outputs=[flag, notice], show_api=False)
def show_hide(_img_ouput):
visible = _img_ouput is not None
return {
flag: gr.Button("Flag", visible=visible, interactive=True),
notice: gr.Markdown(value=NOTICE, visible=visible),
}
# This needs to be called prior to the first call to callback.flag()
hf_writer.setup([img_input], "flagged")
# Sequential logic when flag button is clicked
flag.click(lambda: gr.Info("Thank you for contributing!"), show_api=False).then(
lambda: {flag: gr.Button("Flag", interactive=False)}, [], [flag], show_api=False
).then(
lambda *args: hf_writer.flag(args),
[img_input, flag],
[],
preprocess=False,
show_api=False,
).then(
lambda: load_badges(get_flagged_count()), [], badges, show_api=False
)
# called during initial load in browser
demo.load(lambda: load_badges(get_flagged_count()), [], badges, show_api=False)
if __name__ == "__main__":
demo.queue().launch() # show_api=False)