Spaces:
Running
Running
Commit
·
2c46c5a
1
Parent(s):
7ce11a0
Update app.py
Browse files
app.py
CHANGED
@@ -40,7 +40,7 @@ def yolov8_inference(
|
|
40 |
|
41 |
|
42 |
return annotated_image
|
43 |
-
|
44 |
image_input = gr.inputs.Image() # Adjust the shape according to your requirements
|
45 |
|
46 |
inputs = [
|
@@ -50,7 +50,7 @@ inputs = [
|
|
50 |
),
|
51 |
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
|
52 |
]
|
53 |
-
|
54 |
outputs = gr.Image(type="filepath", label="Output Image")
|
55 |
title = "Fire Smoke Demo"
|
56 |
import os
|
@@ -58,12 +58,84 @@ examples = [
|
|
58 |
["f2.jpg", 0.25, 0.45],
|
59 |
["f3.jpg", 0.25, 0.45],
|
60 |
]
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
|
42 |
return annotated_image
|
43 |
+
'''
|
44 |
image_input = gr.inputs.Image() # Adjust the shape according to your requirements
|
45 |
|
46 |
inputs = [
|
|
|
50 |
),
|
51 |
gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold"),
|
52 |
]
|
53 |
+
'''
|
54 |
outputs = gr.Image(type="filepath", label="Output Image")
|
55 |
title = "Fire Smoke Demo"
|
56 |
import os
|
|
|
58 |
["f2.jpg", 0.25, 0.45],
|
59 |
["f3.jpg", 0.25, 0.45],
|
60 |
]
|
61 |
+
outputs_images = [
|
62 |
+
["1.jpg"], # First example: an output image for the cat example
|
63 |
+
["2.jpg"] # Second example: an output image for the dog example
|
64 |
+
,["3.jpg"]
|
65 |
+
]
|
66 |
+
|
67 |
+
readme_html = """
|
68 |
+
<html>
|
69 |
+
<head>
|
70 |
+
<style>
|
71 |
+
.description {
|
72 |
+
margin: 20px;
|
73 |
+
padding: 10px;
|
74 |
+
border: 1px solid #ccc;
|
75 |
+
}
|
76 |
+
</style>
|
77 |
+
</head>
|
78 |
+
<body>
|
79 |
+
<div class="description">
|
80 |
+
<p><strong>More details:</strong></p>
|
81 |
+
<p>We present a demo for performing object segmentation with training a Yolov8-seg on Fire and Smoke dataset. The model was trained on 141 training images and validated on 40 images.</p>
|
82 |
+
<p><strong>Usage:</strong></p>
|
83 |
+
<p>You can upload Fire-smoke images, and the demo will provide you with your segmented image.</p>
|
84 |
+
<p><strong>Dataset:</strong></p>
|
85 |
+
<p>This dataset comprises a total of 201 images, which are divided into three distinct sets for various purposes:</p>
|
86 |
+
<ul>
|
87 |
+
<li><strong>Training Set:</strong> It includes 141 images and is intended for training the model.</li>
|
88 |
+
<li><strong>Validation Set:</strong> There are 40 images in the validation set, which is used for optimizing model parameters during development.</li>
|
89 |
+
<li><strong>Test Set:</strong> This set consists of 20 images and serves as a separate evaluation dataset to assess the performance of trained models.</li>
|
90 |
+
</ul>
|
91 |
+
<p><strong>License:</strong> This dataset is made available under the Creative Commons Attribution 4.0 International License (CC BY 4.0).</p>
|
92 |
+
<p>To access and download this dataset, please follow this link: <a href=" https://universe.roboflow.com/roboflow-universe-projects/fire-and-smoke-segmentation" target="_blank">Dataset Download</a></p>
|
93 |
+
|
94 |
+
|
95 |
+
</body>
|
96 |
+
</html>
|
97 |
+
"""
|
98 |
+
with gr.Blocks() as demo:
|
99 |
+
gr.Markdown(
|
100 |
+
"""
|
101 |
+
<div style="text-align: center;">
|
102 |
+
<h1> Fire Smoke Demo</h1>
|
103 |
+
Powered by <a href="https://Tuba.ai">Tuba</a>
|
104 |
+
</div>
|
105 |
+
"""
|
106 |
+
)
|
107 |
+
|
108 |
+
|
109 |
+
# Define the input components and add them to the layout
|
110 |
+
with gr.Row():
|
111 |
+
image_input = gr.inputs.Image()
|
112 |
+
|
113 |
+
|
114 |
+
outputs = gr.Image(type="filepath", label="Output Image")
|
115 |
+
|
116 |
+
# Define the output component and add it to the layout
|
117 |
+
with gr.Row():
|
118 |
+
conf_slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.25, step=0.05, label="Confidence Threshold" )
|
119 |
+
with gr.Row():
|
120 |
+
IOU_Slider=gr.Slider(minimum=0.0, maximum=1.0, value=0.45, step=0.05, label="IOU Threshold")
|
121 |
+
|
122 |
+
|
123 |
+
|
124 |
+
|
125 |
+
button = gr.Button("Run")
|
126 |
+
|
127 |
+
|
128 |
+
# Define the event listener that connects the input and output components and triggers the function
|
129 |
+
button.click(fn=yolov8_inference, inputs=[image_input, conf_slider,IOU_Slider], outputs=outputs, api_name="yolov8_inference")
|
130 |
+
|
131 |
+
gr.Examples(
|
132 |
+
fn=yolov8_inference,
|
133 |
+
examples=examples,
|
134 |
+
inputs=[image_input, conf_slider,IOU_Slider],
|
135 |
+
outputs=[outputs]
|
136 |
+
)
|
137 |
+
# gr.Examples(inputs=examples, outputs=outputs_images)
|
138 |
+
# Add the description below the layout
|
139 |
+
gr.Markdown(readme_html)
|
140 |
+
# Launch the app
|
141 |
+
demo.launch(share=False)
|