Spaces:
Build error
Build error
File size: 6,420 Bytes
a35c8ad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 |
import csv
import os.path
import time
import cv2
import gdown
import numpy as np
import streamlit as st
import torch
def load_classes(csv_reader):
"""
Load classes from csv.
:param csv_reader: csv
:return:
"""
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise (ValueError('line {}: format should be \'class_name,class_id\''.format(line)))
class_id = int(class_id)
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
@st.cache
def draw_caption(image, box, caption):
"""
Draw caption and bbox on image.
:param image: image
:param box: bounding box
:param caption: caption
:return:
"""
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
@st.cache
def load_labels():
"""
Loads labels.
:return:
"""
with open("dataset/labels.csv", 'r') as f:
classes = load_classes(csv.reader(f, delimiter=','))
labels = {}
for key, value in classes.items():
labels[value] = key
return labels
def download_models(ids):
"""
Download all models.
:param ids: name and links of models
:return:
"""
# Download model from drive if not stored locally
with st.spinner('Downloading models, this may take a minute...'):
for key in ids:
if not os.path.isfile(f"model/{key}.pt"):
url = f"https://drive.google.com/uc?id={ids[key]}"
gdown.download(url=url, output=f"model/{key}.pt")
@st.cache(suppress_st_warning=True)
def load_model(model_path, prefix: str = 'model/'):
"""
Load model.
:param model_path: path to inference model
:param prefix: model prefix if needed
:return:
"""
# Load model
if torch.cuda.is_available():
model = torch.load(f"{prefix}{model_path}.pt").to('cuda')
else:
model = torch.load(f"{prefix}{model_path}.pt", map_location=torch.device('cpu'))
model = model.module.cpu()
model.training = False
model.eval()
return model
def process_img(model, image, labels, caption: bool = True):
"""
Process img given a model.
:param caption: whether to use captions or not
:param image: image to process
:param model: inference model
:param labels: given labels
:return:
"""
image_orig = image.copy()
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# Rescale the image
min_side = 608
max_side = 1024
scale = min_side / smallest_side
# Check if the largest side is now greater than max_side
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# Resize the image with the computed scale
image = cv2.resize(image, (int(round(cols * scale)), int(round((rows * scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows % 32
pad_h = 32 - cols % 32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
image = new_image.astype(np.float32)
image /= 255
image -= [0.485, 0.456, 0.406]
image /= [0.229, 0.224, 0.225]
image = np.expand_dims(image, 0)
image = np.transpose(image, (0, 3, 1, 2))
with torch.no_grad():
image = torch.from_numpy(image)
if torch.cuda.is_available():
image = image.cuda()
st = time.time()
scores, classification, transformed_anchors = model(image.float())
elapsed_time = time.time() - st
idxs = np.where(scores.cpu() > 0.5)
for j in range(idxs[0].shape[0]):
bbox = transformed_anchors[idxs[0][j], :]
x1 = int(bbox[0] / scale)
y1 = int(bbox[1] / scale)
x2 = int(bbox[2] / scale)
y2 = int(bbox[3] / scale)
label_name = labels[int(classification[idxs[0][j]])]
colors = {
'with_mask': (0, 255, 0),
'without_mask': (255, 0, 0),
'mask_weared_incorrect': (190, 100, 20)
}
cap = '{}'.format(label_name) if caption else ''
draw_caption(image_orig, (x1, y1, x2, y2), cap)
cv2.rectangle(image_orig, (x1, y1), (x2, y2), color=colors[label_name], thickness=2)
cv2.putText(image_orig,
f"{'{:.1f}'.format(1 / float(elapsed_time))}{' cuda:' + str(torch.cuda.is_available()).lower()}",
fontScale=1, fontFace=cv2.FONT_HERSHEY_PLAIN, org=(10, 20), color=(0, 255, 0))
return image_orig
# Page config
st.set_page_config(layout="centered")
st.sidebar.title("Face Mask Detection")
# Models drive ids
ids = {
'resnet50_20': st.secrets['resnet50'],
# 'resnet50_29': '1E_IOIuE5OpO4tQgTbXjdAmXR-9BCxxmT',
'resnet152_20': st.secrets['resnet152'],
}
# Download all models from drive
download_models(ids)
# Model selection
labels = load_labels()
model_path = st.selectbox('Choose a model', options=[k for k in ids], index=0)
model = load_model(model_path=model_path) if model_path != '' else None
# Content
st.title('Face Mask Detection')
st.write('ResNet[18~152] trained for Face Mask Detection. ')
st.markdown(f"__Labels:__ with_mask, without_mask, mask_weared_incorrect")
# Display example selection
index = st.number_input('', min_value=0, max_value=852, value=495, help='Choose an image. ')
left, right = st.columns([3, 1])
# Get corresponding image and transform it
image = cv2.imread(f'dataset/validation/image/maksssksksss{str(index)}.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Process img
with st.spinner('Please wait while the image is being processed... This may take a while. '):
image = process_img(model, image, labels, caption=False)
left.image(image)
# Write labels dict and device on right
right.write({
'green': 'with_mask',
'orange': 'mask_weared_incorrect',
'red': 'without_mask'
})
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
right.write(device)
|