Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -18,11 +18,11 @@ import gradio as gr
|
|
18 |
|
19 |
|
20 |
def image_fn(
|
21 |
-
image: gr.Image = None,
|
22 |
-
model_path: gr.Dropdown = None,
|
23 |
-
image_size: gr.Slider = 640,
|
24 |
-
conf_threshold: gr.Slider = 0.25,
|
25 |
-
iou_threshold: gr.Slider = 0.45,
|
26 |
):
|
27 |
"""
|
28 |
YOLOv7 inference function
|
@@ -93,21 +93,22 @@ def video_fn(model_path, video_file, conf_thres, iou_thres, start_sec, duration)
|
|
93 |
image_interface = gr.Interface(
|
94 |
fn=image_fn,
|
95 |
inputs=[
|
96 |
-
gr.Image(type="pil", label="Input Image"),
|
97 |
-
gr.Dropdown(
|
98 |
choices=[
|
99 |
-
"
|
100 |
#"kadirnar/yolov7-v0.1",
|
101 |
],
|
|
|
102 |
label="Model",
|
103 |
)
|
104 |
#gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
|
105 |
#gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
106 |
#gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
|
107 |
],
|
108 |
-
outputs=gr.Image(type="filepath", label="Output Image"),
|
109 |
title="Smart Environmental Eye (SEE)",
|
110 |
-
examples=[['image1.jpg', '
|
111 |
cache_examples=True,
|
112 |
theme='huggingface',
|
113 |
)
|
@@ -116,12 +117,13 @@ image_interface = gr.Interface(
|
|
116 |
video_interface = gr.Interface(
|
117 |
fn=video_fn,
|
118 |
inputs=[
|
119 |
-
gr.Video(source = "upload", type = "mp4", label = "Input Video"),
|
120 |
-
gr.Dropdown(
|
121 |
choices=[
|
122 |
-
"
|
123 |
#"kadirnar/yolov7-v0.1",
|
124 |
],
|
|
|
125 |
label="Model",
|
126 |
),
|
127 |
],
|
|
|
18 |
|
19 |
|
20 |
def image_fn(
|
21 |
+
image: gr.inputs.Image = None,
|
22 |
+
model_path: gr.inputs.Dropdown = None,
|
23 |
+
image_size: gr.inputs.Slider = 640,
|
24 |
+
conf_threshold: gr.inputs.Slider = 0.25,
|
25 |
+
iou_threshold: gr.inputs.Slider = 0.45,
|
26 |
):
|
27 |
"""
|
28 |
YOLOv7 inference function
|
|
|
93 |
image_interface = gr.Interface(
|
94 |
fn=image_fn,
|
95 |
inputs=[
|
96 |
+
gr.inputs.Image(type="pil", label="Input Image"),
|
97 |
+
gr.inputs.Dropdown(
|
98 |
choices=[
|
99 |
+
"alshimaa/SEE_model_yolo7",
|
100 |
#"kadirnar/yolov7-v0.1",
|
101 |
],
|
102 |
+
default="alshimaa/SEE_model_yolo7",
|
103 |
label="Model",
|
104 |
)
|
105 |
#gr.inputs.Slider(minimum=320, maximum=1280, default=640, step=32, label="Image Size")
|
106 |
#gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.25, step=0.05, label="Confidence Threshold"),
|
107 |
#gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.45, step=0.05, label="IOU Threshold")
|
108 |
],
|
109 |
+
outputs=gr.outputs.Image(type="filepath", label="Output Image"),
|
110 |
title="Smart Environmental Eye (SEE)",
|
111 |
+
examples=[['image1.jpg', 'alshimaa/SEE_model_yolo7', 640, 0.25, 0.45]],
|
112 |
cache_examples=True,
|
113 |
theme='huggingface',
|
114 |
)
|
|
|
117 |
video_interface = gr.Interface(
|
118 |
fn=video_fn,
|
119 |
inputs=[
|
120 |
+
gr.inputs.Video(source = "upload", type = "mp4", label = "Input Video"),
|
121 |
+
gr.inputs.Dropdown(
|
122 |
choices=[
|
123 |
+
"alshimaa/SEE_model_yolo7",
|
124 |
#"kadirnar/yolov7-v0.1",
|
125 |
],
|
126 |
+
default="alshimaa/SEE_model_yolo7",
|
127 |
label="Model",
|
128 |
),
|
129 |
],
|