Spaces:
Running
Running
File size: 4,609 Bytes
efae727 5fe653d efae727 869f4d7 ac783cd e921d65 955daea a378000 0f0204b a378000 246a775 a378000 69f95c1 5fe653d b29c515 585407e e921d65 8e60091 5fcbed8 8e60091 0f0204b 7fa7461 8e60091 e921d65 7904cc2 1309dab 7904cc2 1309dab e921d65 8e60091 e921d65 ac783cd 1a4222a 341c6ee 48ea831 ac783cd 9b3bb2e d44f6a3 48ea831 e85cd1e 8cdef7f 5fe653d ac783cd 9b3bb2e ac783cd 585407e 9b3bb2e ac783cd a378000 4725ead 69f95c1 246a775 a378000 631a047 3eb4992 246a775 955daea e921d65 0f0204b ac783cd 0f0204b e921d65 0f0204b 7904cc2 e921d65 efae727 869f4d7 e921d65 8cdef7f e85cd1e e921d65 a378000 e921d65 5fe653d fb2817b a378000 8cdef7f a378000 e921d65 955daea efae727 e921d65 7904cc2 e921d65 955daea e921d65 a378000 687d726 7904cc2 869f4d7 9b3bb2e 955daea e921d65 687d726 ac783cd 869f4d7 ac783cd 869f4d7 e921d65 a378000 7737736 a378000 955daea efae727 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
"""
Main application for RGB detection demo.
Any new model should implement the following functions:
- load_model(model_path, img_size=640)
- inference(model, image)
"""
import os
import glob
# import spaces
import gradio as gr
from huggingface_hub import get_token
from utils import (
check_image,
load_image_from_url,
load_badges,
FlaggedCounter,
)
from flagging import HuggingFaceDatasetSaver
import install_private_repos # noqa: F401
from seavision import load_model, imread
TITLE = """
<h1> π SEA.AI's Vision Demo β¨ </h1>
<p align="center">
Ahoy! Explore our object detection technology!
Upload a maritime scene image and click <code>Submit</code>
to see the results.
</p>
"""
FLAG_TXT = "Report Mis-detection"
NOTICE = f"""
π© See something off? Your feedback makes a difference! Let us know by
flagging any outcomes that don't seem right. Click the `{FLAG_TXT}` button
to submit the image for review.
"""
css = """
h1 {
text-align: center;
display: block;
}
"""
model = load_model("experimental/ahoy6-MIX-1280-b1.onnx")
model.det_conf_thresh = 0.1
model.hor_conf_thresh = 0.1
# @spaces.GPU
def inference(image):
"""Run inference on image and return annotated image."""
results = model(image)
return results.draw(image)
def flag_img_input(
image: gr.Image, flag_option: str = "misdetection", username: str = "anonymous"
):
"""Wrapper for flagging"""
print(f"{image=}, {flag_option=}, {username=}")
hf_writer.flag([image], flag_option=flag_option, username=username)
# Flagging
dataset_name = "SEA-AI/crowdsourced-sea-images"
hf_writer = HuggingFaceDatasetSaver(get_token(), dataset_name)
flagged_counter = FlaggedCounter(dataset_name)
theme = gr.themes.Default(primary_hue=gr.themes.colors.indigo)
with gr.Blocks(theme=theme, css=css, title="SEA.AI Vision Demo") as demo:
badges = gr.HTML(load_badges(flagged_counter.count()))
title = gr.HTML(TITLE)
with gr.Row():
with gr.Column():
img_input = gr.Image(
label="input",
interactive=True,
sources=["upload", "clipboard"],
)
img_url = gr.Textbox(
lines=1,
placeholder="or enter URL to image here",
label="input_url",
show_label=False,
)
with gr.Row():
clear = gr.ClearButton()
submit = gr.Button("Submit", variant="primary")
with gr.Column():
img_output = gr.Image(label="output", interactive=False)
flag = gr.Button(FLAG_TXT, visible=False)
notice = gr.Markdown(value=NOTICE, visible=False)
examples = gr.Examples(
examples=glob.glob("examples/*.jpg"),
inputs=img_input,
outputs=img_output,
fn=inference,
cache_examples=True,
)
# add components to clear when clear button is clicked
clear.add([img_input, img_url, img_output])
# event listeners
img_url.change(load_image_from_url, [img_url], img_input)
submit.click(check_image, [img_input], None, show_api=False).success(
inference,
[img_input],
img_output,
api_name="inference",
)
# event listeners with decorators
@img_output.change(
inputs=[img_input, img_output],
outputs=[flag, notice],
show_api=False,
preprocess=False,
show_progress="hidden",
)
def _show_hide_flagging(_img_input, _img_output):
visible = _img_output and _img_input["orig_name"] not in os.listdir("examples")
return {
flag: gr.Button(FLAG_TXT, interactive=True, visible=visible),
notice: gr.Markdown(value=NOTICE, visible=visible),
}
# This needs to be called prior to the first call to callback.flag()
hf_writer.setup([img_input], "flagged")
# Sequential logic when flag button is clicked
flag.click(lambda: gr.Info("Thank you for contributing!"), show_api=False).then(
lambda: {flag: gr.Button(FLAG_TXT, interactive=False)},
[],
[flag],
show_api=False,
).then(
flag_img_input,
[img_input],
[],
preprocess=False,
show_api=True,
api_name="flag_misdetection",
).then(
lambda: load_badges(flagged_counter.count()),
[],
badges,
show_api=False,
)
# called during initial load in browser
demo.load(lambda: load_badges(flagged_counter.count()), [], badges, show_api=False)
if __name__ == "__main__":
demo.queue().launch()
|