Spaces:
Runtime error
Runtime error
File size: 4,161 Bytes
efae727 869f4d7 e921d65 955daea a378000 0f0204b a378000 246a775 a378000 69f95c1 efae727 e921d65 8e60091 3eb4992 8e60091 0f0204b 7fa7461 8e60091 e921d65 7904cc2 1309dab 7904cc2 1309dab e921d65 8e60091 e921d65 4624c16 e921d65 a378000 4725ead 69f95c1 246a775 a378000 3eb4992 246a775 955daea e921d65 0f0204b e921d65 0f0204b 7904cc2 e921d65 efae727 869f4d7 e921d65 a378000 e921d65 7904cc2 a378000 e921d65 955daea efae727 e921d65 7904cc2 e921d65 955daea e921d65 a378000 7904cc2 869f4d7 955daea e921d65 a378000 869f4d7 246a775 869f4d7 e921d65 a378000 246a775 a378000 955daea efae727 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
"""
Main application for RGB detection demo.
Any new model should implement the following functions:
- load_model(model_path, img_size=640)
- inference(model, image)
"""
import os
import glob
import gradio as gr
from huggingface_hub import get_token
from utils import (
check_image,
load_image_from_url,
load_badges,
FlaggedCounter,
)
from flagging import HuggingFaceDatasetSaver
from model_yolov5 import load_model, inference
TITLE = """
<h1> π SEA.AI's Machine Vision Demo β¨ </h1>
<p align="center">
Ahoy! Explore our object detection technology!
Upload a maritime scene image and click <code>Submit</code>
to see the results.
</p>
"""
FLAG_TXT = "Report Mis-detection"
NOTICE = f"""
π© See something off? Your feedback makes a difference! Let us know by
flagging any outcomes that don't seem right. Click the `{FLAG_TXT}` button
to submit the image for review.
"""
css = """
h1 {
text-align: center;
display: block;
}
"""
model = load_model("SEA-AI/yolov5n6-RGB", img_size=1280)
model.conf = 0.10
model.iou = 0.4
model.max_det = 100
model.agnostic = True # NMS class-agnostic
# Flagging
dataset_name = "SEA-AI/crowdsourced-sea-images"
hf_writer = HuggingFaceDatasetSaver(get_token(), dataset_name)
flagged_counter = FlaggedCounter(dataset_name)
theme = gr.themes.Default(primary_hue=gr.themes.colors.indigo)
with gr.Blocks(theme=theme, css=css, title="SEA.AI Vision Demo") as demo:
badges = gr.HTML(load_badges(flagged_counter.count()))
title = gr.HTML(TITLE)
with gr.Row():
with gr.Column():
img_input = gr.Image(
label="input", interactive=True, sources=["upload", "clipboard"]
)
img_url = gr.Textbox(
lines=1,
placeholder="or enter URL to image here",
label="input_url",
show_label=False,
)
with gr.Row():
clear = gr.ClearButton()
submit = gr.Button("Submit", variant="primary")
with gr.Column():
img_output = gr.Image(label="output", interactive=False)
flag = gr.Button(FLAG_TXT, visible=False)
notice = gr.Markdown(value=NOTICE, visible=False)
examples = gr.Examples(
examples=glob.glob("examples/*.jpg"),
inputs=img_input,
outputs=img_output,
fn=lambda image: inference(model, image),
cache_examples=True,
)
# add components to clear when clear button is clicked
clear.add([img_input, img_url, img_output])
# event listeners
img_url.change(load_image_from_url, [img_url], img_input)
submit.click(check_image, [img_input], show_api=False).success(
lambda image: inference(model, image),
[img_input],
img_output,
api_name="inference",
)
# event listeners with decorators
@img_output.change(
inputs=[img_input, img_output],
outputs=[flag, notice],
show_api=False,
preprocess=False,
show_progress="hidden",
)
def _show_hide_flagging(_img_input, _img_output):
visible = _img_output and _img_input["orig_name"] not in os.listdir("examples")
return {
flag: gr.Button(FLAG_TXT, interactive=True, visible=visible),
notice: gr.Markdown(value=NOTICE, visible=visible),
}
# This needs to be called prior to the first call to callback.flag()
hf_writer.setup([img_input], "flagged")
# Sequential logic when flag button is clicked
flag.click(lambda: gr.Info("Thank you for contributing!"), show_api=False).then(
lambda: {flag: gr.Button(FLAG_TXT, interactive=False)},
[],
[flag],
show_api=False,
).then(
lambda *args: hf_writer.flag(args),
[img_input, flag],
[],
preprocess=False,
show_api=False,
).then(
lambda: load_badges(flagged_counter.count()), [], badges, show_api=False
)
# called during initial load in browser
demo.load(lambda: load_badges(flagged_counter.count()), [], badges, show_api=False)
if __name__ == "__main__":
demo.queue().launch()
|