|
from fastai.vision.all import * |
|
from io import BytesIO |
|
import requests |
|
import streamlit as st |
|
|
|
import numpy as np |
|
import torch |
|
import time |
|
import cv2 |
|
from numpy import random |
|
from models.experimental import attempt_load |
|
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \ |
|
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path |
|
from utils.plots import plot_one_box |
|
|
|
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32): |
|
|
|
shape = img.shape[:2] |
|
if isinstance(new_shape, int): |
|
new_shape = (new_shape, new_shape) |
|
|
|
|
|
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) |
|
if not scaleup: |
|
r = min(r, 1.0) |
|
|
|
|
|
ratio = r, r |
|
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) |
|
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] |
|
if auto: |
|
dw, dh = np.mod(dw, stride), np.mod(dh, stride) |
|
elif scaleFill: |
|
dw, dh = 0.0, 0.0 |
|
new_unpad = (new_shape[1], new_shape[0]) |
|
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] |
|
|
|
dw /= 2 |
|
dh /= 2 |
|
|
|
if shape[::-1] != new_unpad: |
|
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR) |
|
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) |
|
left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) |
|
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) |
|
return img, ratio, (dw, dh) |
|
|
|
def detect_modify(img0, model, conf=0.4, imgsz=640, conf_thres = 0.25, iou_thres=0.45): |
|
st.image(img0, caption="Your image", use_column_width=True) |
|
|
|
stride = int(model.stride.max()) |
|
imgsz = check_img_size(imgsz, s=stride) |
|
|
|
|
|
img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_RGB2BGR) |
|
img = letterbox(img0, imgsz, stride=stride)[0] |
|
|
|
img = img[:, :, ::-1].transpose(2, 0, 1) |
|
img = np.ascontiguousarray(img) |
|
|
|
|
|
|
|
names = model.module.names if hasattr(model, 'module') else model.names |
|
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] |
|
|
|
|
|
old_img_w = old_img_h = imgsz |
|
old_img_b = 1 |
|
|
|
t0 = time.time() |
|
img = torch.from_numpy(img).to(device) |
|
|
|
img = img/255.0 |
|
if img.ndimension() == 3: |
|
img = img.unsqueeze(0) |
|
|
|
|
|
|
|
with torch.no_grad(): |
|
pred = model(img)[0] |
|
|
|
|
|
|
|
pred = non_max_suppression(pred, conf_thres, iou_thres) |
|
|
|
|
|
|
|
|
|
|
|
gn = torch.tensor(img0.shape)[[1, 0, 1, 0]] |
|
|
|
det = pred[0] |
|
if len(det): |
|
|
|
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round() |
|
|
|
|
|
s = '' |
|
for c in det[:, -1].unique(): |
|
n = (det[:, -1] == c).sum() |
|
s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " |
|
|
|
|
|
for *xyxy, conf, cls in reversed(det): |
|
label = f'{names[int(cls)]} {conf:.2f}' |
|
plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=1) |
|
|
|
f""" |
|
### Prediction result: |
|
""" |
|
img0 = cv2.cvtColor(np.asarray(img0), cv2.COLOR_BGR2RGB) |
|
st.image(img0, caption="Prediction Result", use_column_width=True) |
|
|
|
|
|
weight_path = './yolov7.pt' |
|
imgsz = 640 |
|
conf = 0.4 |
|
conf_thres = 0.25 |
|
iou_thres=0.45 |
|
device = torch.device("cpu") |
|
path = "./" |
|
|
|
|
|
model = attempt_load(weight_path, map_location=torch.device('cpu')) |
|
|
|
""" |
|
# YOLOv7 |
|
This is a object detection model for [Face]. |
|
""" |
|
option = st.radio("", ["Upload Image", "Image URL"]) |
|
|
|
if option == "Upload Image": |
|
uploaded_file = st.file_uploader("Please upload an image.") |
|
|
|
if uploaded_file is not None: |
|
img = PILImage.create(uploaded_file) |
|
detect_modify(img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres) |
|
else: |
|
url = st.text_input("Please input a url.") |
|
if url != "": |
|
try: |
|
response = requests.get(url) |
|
pil_img = PILImage.create(BytesIO(response.content)) |
|
detect_modify(pil_img, model, conf=conf, imgsz=imgsz, conf_thres=conf_thres, iou_thres=iou_thres) |
|
except: |
|
st.text("Problem reading image from", url) |
|
|