Spaces:
Running
Running
fcakyon
commited on
Commit
·
161c582
1
Parent(s):
af56243
update dependencies
Browse files- app.py +1 -1
- requirements.txt +3 -2
- utils.py +0 -127
app.py
CHANGED
@@ -3,8 +3,8 @@ import sahi.utils.mmdet
|
|
3 |
import sahi.model
|
4 |
from PIL import Image
|
5 |
import random
|
6 |
-
from utils import image_comparison
|
7 |
from utils import sahi_mmdet_inference
|
|
|
8 |
|
9 |
MMDET_YOLOX_MODEL_URL = "https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth"
|
10 |
|
|
|
3 |
import sahi.model
|
4 |
from PIL import Image
|
5 |
import random
|
|
|
6 |
from utils import sahi_mmdet_inference
|
7 |
+
from streamlit_image_comparison import image_comparison
|
8 |
|
9 |
MMDET_YOLOX_MODEL_URL = "https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20210806_234250-4ff3b67e.pth"
|
10 |
|
requirements.txt
CHANGED
@@ -2,7 +2,8 @@
|
|
2 |
-f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8.0/index.html
|
3 |
torch==1.8.1+cpu
|
4 |
torchvision==0.9.1+cpu
|
5 |
-
|
6 |
mmdet==2.18.1
|
7 |
mmcv-full==1.3.17
|
8 |
-
streamlit
|
|
|
|
2 |
-f https://download.openmmlab.com/mmcv/dist/cpu/torch1.8.0/index.html
|
3 |
torch==1.8.1+cpu
|
4 |
torchvision==0.9.1+cpu
|
5 |
+
sahi==0.8.11
|
6 |
mmdet==2.18.1
|
7 |
mmcv-full==1.3.17
|
8 |
+
streamlit
|
9 |
+
streamlit-image-comparison==0.0.1
|
utils.py
CHANGED
@@ -1,12 +1,7 @@
|
|
1 |
-
import streamlit.components.v1 as components
|
2 |
import numpy
|
3 |
import sahi.predict
|
4 |
import sahi.utils
|
5 |
from PIL import Image
|
6 |
-
import base64
|
7 |
-
import io
|
8 |
-
import os
|
9 |
-
import uuid
|
10 |
|
11 |
TEMP_DIR = "temp"
|
12 |
|
@@ -57,125 +52,3 @@ def sahi_mmdet_inference(
|
|
57 |
output_2 = Image.fromarray(visual_result_2["image"])
|
58 |
|
59 |
return output_1, output_2
|
60 |
-
|
61 |
-
|
62 |
-
def pillow_to_base64(image: Image.Image):
|
63 |
-
in_mem_file = io.BytesIO()
|
64 |
-
image.save(in_mem_file, format="JPEG", subsampling=0, quality=100)
|
65 |
-
img_bytes = in_mem_file.getvalue() # bytes
|
66 |
-
image_str = base64.b64encode(img_bytes).decode("utf-8")
|
67 |
-
base64_src = f"data:image/jpg;base64,{image_str}"
|
68 |
-
return base64_src
|
69 |
-
|
70 |
-
|
71 |
-
def local_file_to_base64(image_path: str):
|
72 |
-
file_ = open(image_path, "rb")
|
73 |
-
img_bytes = file_.read()
|
74 |
-
image_str = base64.b64encode(img_bytes).decode("utf-8")
|
75 |
-
file_.close()
|
76 |
-
base64_src = f"data:image/jpg;base64,{image_str}"
|
77 |
-
return base64_src
|
78 |
-
|
79 |
-
|
80 |
-
def pillow_local_file_to_base64(image: Image.Image):
|
81 |
-
# pillow to local file
|
82 |
-
img_path = TEMP_DIR + "/" + str(uuid.uuid4()) + ".jpg"
|
83 |
-
image.save(img_path, subsampling=0, quality=100)
|
84 |
-
# local file base64 str
|
85 |
-
base64_src = local_file_to_base64(img_path)
|
86 |
-
return base64_src
|
87 |
-
|
88 |
-
|
89 |
-
def image_comparison(
|
90 |
-
img1: str,
|
91 |
-
img2: str,
|
92 |
-
label1: str = "1",
|
93 |
-
label2: str = "2",
|
94 |
-
width: int = 700,
|
95 |
-
show_labels: bool = True,
|
96 |
-
starting_position: int = 50,
|
97 |
-
make_responsive: bool = True,
|
98 |
-
in_memory=False,
|
99 |
-
):
|
100 |
-
"""Create a new juxtapose component.
|
101 |
-
Parameters
|
102 |
-
----------
|
103 |
-
img1: str, PosixPath, PIL.Image or URL
|
104 |
-
Input image to compare
|
105 |
-
img2: str, PosixPath, PIL.Image or URL
|
106 |
-
Input image to compare
|
107 |
-
label1: str or None
|
108 |
-
Label for image 1
|
109 |
-
label2: str or None
|
110 |
-
Label for image 2
|
111 |
-
width: int or None
|
112 |
-
Width of the component in px
|
113 |
-
show_labels: bool or None
|
114 |
-
Show given labels on images
|
115 |
-
starting_position: int or None
|
116 |
-
Starting position of the slider as percent (0-100)
|
117 |
-
make_responsive: bool or None
|
118 |
-
Enable responsive mode
|
119 |
-
in_memory: bool or None
|
120 |
-
Handle pillow to base64 conversion in memory without saving to local
|
121 |
-
Returns
|
122 |
-
-------
|
123 |
-
static_component: Boolean
|
124 |
-
Returns a static component with a timeline
|
125 |
-
"""
|
126 |
-
# prepare images
|
127 |
-
img_width, img_height = img1.size
|
128 |
-
h_to_w = img_height / img_width
|
129 |
-
height = (width * h_to_w) * 0.95
|
130 |
-
|
131 |
-
img1_pillow = sahi.utils.cv.read_image_as_pil(img1)
|
132 |
-
img2_pillow = sahi.utils.cv.read_image_as_pil(img2)
|
133 |
-
|
134 |
-
if in_memory:
|
135 |
-
# create base64 str from pillow images
|
136 |
-
img1 = pillow_to_base64(img1_pillow)
|
137 |
-
img2 = pillow_to_base64(img2_pillow)
|
138 |
-
else:
|
139 |
-
# clean temp dir
|
140 |
-
os.makedirs(TEMP_DIR, exist_ok=True)
|
141 |
-
for file_ in os.listdir(TEMP_DIR):
|
142 |
-
if file_.endswith(".jpg"):
|
143 |
-
os.remove(TEMP_DIR + "/" + file_)
|
144 |
-
# create base64 str from pillow images
|
145 |
-
img1 = pillow_local_file_to_base64(img1_pillow)
|
146 |
-
img2 = pillow_local_file_to_base64(img2_pillow)
|
147 |
-
|
148 |
-
# load css + js
|
149 |
-
cdn_path = "https://cdn.knightlab.com/libs/juxtapose/latest"
|
150 |
-
css_block = f'<link rel="stylesheet" href="{cdn_path}/css/juxtapose.css">'
|
151 |
-
js_block = f'<script src="{cdn_path}/js/juxtapose.min.js"></script>'
|
152 |
-
|
153 |
-
# write html block
|
154 |
-
htmlcode = f"""
|
155 |
-
{css_block}
|
156 |
-
{js_block}
|
157 |
-
<div id="foo"style="height: {height}; width: {width or '%100'};"></div>
|
158 |
-
<script>
|
159 |
-
slider = new juxtapose.JXSlider('#foo',
|
160 |
-
[
|
161 |
-
{{
|
162 |
-
src: '{img1}',
|
163 |
-
label: '{label1}',
|
164 |
-
}},
|
165 |
-
{{
|
166 |
-
src: '{img2}',
|
167 |
-
label: '{label2}',
|
168 |
-
}}
|
169 |
-
],
|
170 |
-
{{
|
171 |
-
animate: true,
|
172 |
-
showLabels: {'true' if show_labels else 'false'},
|
173 |
-
showCredits: true,
|
174 |
-
startingPosition: "{starting_position}%",
|
175 |
-
makeResponsive: {'true' if make_responsive else 'false'},
|
176 |
-
}});
|
177 |
-
</script>
|
178 |
-
"""
|
179 |
-
static_component = components.html(htmlcode, height=height, width=width)
|
180 |
-
|
181 |
-
return static_component
|
|
|
|
|
1 |
import numpy
|
2 |
import sahi.predict
|
3 |
import sahi.utils
|
4 |
from PIL import Image
|
|
|
|
|
|
|
|
|
5 |
|
6 |
TEMP_DIR = "temp"
|
7 |
|
|
|
52 |
output_2 = Image.fromarray(visual_result_2["image"])
|
53 |
|
54 |
return output_1, output_2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|