Spaces:
Running
Running
fcakyon
commited on
Commit
·
14787cc
1
Parent(s):
958dc22
randomly direct sahi related urls during spinner
Browse files
app.py
CHANGED
@@ -4,6 +4,7 @@ import sahi.model
|
|
4 |
import sahi.predict
|
5 |
from PIL import Image
|
6 |
import numpy
|
|
|
7 |
|
8 |
|
9 |
MMDET_YOLACT_MODEL_URL = "https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth"
|
@@ -51,8 +52,8 @@ def get_mmdet_model(model_name: str):
|
|
51 |
config_path = sahi.utils.mmdet.download_mmdet_config(
|
52 |
model_name="yolox", config_file_name="yolox_tiny_8x8_300e_coco.py"
|
53 |
)
|
54 |
-
elif model_name == "
|
55 |
-
model_path = "
|
56 |
sahi.utils.file.download_from_url(
|
57 |
MMDET_FASTERRCNN_MODEL_URL,
|
58 |
model_path,
|
@@ -130,7 +131,7 @@ st.markdown(
|
|
130 |
unsafe_allow_html=True,
|
131 |
)
|
132 |
st.markdown(
|
133 |
-
"<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://
|
134 |
unsafe_allow_html=True,
|
135 |
)
|
136 |
|
@@ -162,7 +163,7 @@ with col1:
|
|
162 |
st.image(image, caption=slider, width=300)
|
163 |
with col3:
|
164 |
model_name = st.selectbox(
|
165 |
-
"Select MMDetection model:", ("
|
166 |
)
|
167 |
slice_size = st.number_input("slice_size", 256, value=512, step=256)
|
168 |
overlap_ratio = st.number_input("overlap_ratio", 0.0, 0.6, value=0.2, step=0.2)
|
@@ -186,21 +187,58 @@ if image_file is not None:
|
|
186 |
else:
|
187 |
image = Image.open(slider)
|
188 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
if submit:
|
190 |
# perform prediction
|
191 |
st.markdown(
|
192 |
"<h3 style='text-align: center'> Results: </h1>",
|
193 |
unsafe_allow_html=True,
|
194 |
)
|
195 |
-
|
|
|
|
|
|
|
|
|
196 |
detection_model = get_mmdet_model(model_name)
|
|
|
197 |
if model_name == "yolox":
|
198 |
image_size = 416
|
199 |
else:
|
200 |
image_size = 640
|
201 |
|
202 |
with st.spinner(
|
203 |
-
text="Performing prediction..
|
204 |
):
|
205 |
output_1, output_2 = sahi_mmdet_inference(
|
206 |
image,
|
|
|
4 |
import sahi.predict
|
5 |
from PIL import Image
|
6 |
import numpy
|
7 |
+
import random
|
8 |
|
9 |
|
10 |
MMDET_YOLACT_MODEL_URL = "https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth"
|
|
|
52 |
config_path = sahi.utils.mmdet.download_mmdet_config(
|
53 |
model_name="yolox", config_file_name="yolox_tiny_8x8_300e_coco.py"
|
54 |
)
|
55 |
+
elif model_name == "faster_rcnn":
|
56 |
+
model_path = "faster_rcnn.pt"
|
57 |
sahi.utils.file.download_from_url(
|
58 |
MMDET_FASTERRCNN_MODEL_URL,
|
59 |
model_path,
|
|
|
131 |
unsafe_allow_html=True,
|
132 |
)
|
133 |
st.markdown(
|
134 |
+
"<p style='text-align: center'>SAHI is a lightweight vision library for performing large scale object detection/ instance segmentation.. <a href='https://github.com/obss/sahi'>SAHI Github</a> | <a href='https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80'>SAHI Blog</a> | <a href='https://huggingface.co/spaces/fcakyon/sahi-yolov5'>SAHI+YOLOv5 Demo</a> </p>",
|
135 |
unsafe_allow_html=True,
|
136 |
)
|
137 |
|
|
|
163 |
st.image(image, caption=slider, width=300)
|
164 |
with col3:
|
165 |
model_name = st.selectbox(
|
166 |
+
"Select MMDetection model:", ("faster_rcnn", "yolact", "yolox"), index=2
|
167 |
)
|
168 |
slice_size = st.number_input("slice_size", 256, value=512, step=256)
|
169 |
overlap_ratio = st.number_input("overlap_ratio", 0.0, 0.6, value=0.2, step=0.2)
|
|
|
187 |
else:
|
188 |
image = Image.open(slider)
|
189 |
|
190 |
+
|
191 |
+
class SpinnerTexts:
|
192 |
+
def __init__(self):
|
193 |
+
self.ind_history_list = []
|
194 |
+
self.text_list = [
|
195 |
+
"Meanwhile check out [MMDetection Colab notebook of SAHI](https://colab.research.google.com/github/obss/sahi/blob/main/demo/inference_for_mmdetection.ipynb)!",
|
196 |
+
"Meanwhile check out [aerial object detection with SAHI](https://blog.ml6.eu/how-to-detect-small-objects-in-very-large-images-70234bab0f98?gi=b434299595d4)!",
|
197 |
+
"Meanwhile check out [COCO Utilities of SAHI](https://github.com/obss/sahi/blob/main/docs/COCO.md)!",
|
198 |
+
"Meanwhile check out [FiftyOne utilities of SAHI](https://github.com/obss/sahi#fiftyone-utilities)!",
|
199 |
+
"Meanwhile check out [easy installation of SAHI](https://github.com/obss/sahi#getting-started)!",
|
200 |
+
"Meanwhile check out [give a Github star to SAHI](https://github.com/obss/sahi/stargazers)!",
|
201 |
+
"Meanwhile check out [easy installation of SAHI](https://github.com/obss/sahi#getting-started)!",
|
202 |
+
"Meanwhile check out [Medium blogpost of SAHI](https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80)!",
|
203 |
+
"Meanwhile check out [YOLOv5 HF Spaces demo of SAHI](https://huggingface.co/spaces/fcakyon/sahi-yolov5)!",
|
204 |
+
]
|
205 |
+
|
206 |
+
def _store(self, ind):
|
207 |
+
if len(self.ind_history_list) == 6:
|
208 |
+
self.ind_history_list.pop(0)
|
209 |
+
self.ind_history_list.append(ind)
|
210 |
+
|
211 |
+
def get(self):
|
212 |
+
ind = 0
|
213 |
+
while ind in self.ind_history_list:
|
214 |
+
ind = random.randint(0, len(self.text_list) - 1)
|
215 |
+
self._store(ind)
|
216 |
+
return self.text_list[ind]
|
217 |
+
|
218 |
+
|
219 |
+
if "last_spinner_texts" not in st.session_state:
|
220 |
+
st.session_state["last_spinner_texts"] = SpinnerTexts()
|
221 |
+
|
222 |
if submit:
|
223 |
# perform prediction
|
224 |
st.markdown(
|
225 |
"<h3 style='text-align: center'> Results: </h1>",
|
226 |
unsafe_allow_html=True,
|
227 |
)
|
228 |
+
|
229 |
+
with st.spinner(
|
230 |
+
text="Downloading model weight.. "
|
231 |
+
+ st.session_state["last_spinner_texts"].get()
|
232 |
+
):
|
233 |
detection_model = get_mmdet_model(model_name)
|
234 |
+
|
235 |
if model_name == "yolox":
|
236 |
image_size = 416
|
237 |
else:
|
238 |
image_size = 640
|
239 |
|
240 |
with st.spinner(
|
241 |
+
text="Performing prediction.. " + st.session_state["last_spinner_texts"].get()
|
242 |
):
|
243 |
output_1, output_2 = sahi_mmdet_inference(
|
244 |
image,
|