Nada2001's picture
Update app.py
85d8021
import streamlit as st
import torch
from detect import *
from PIL import Image
from io import *
import glob
from datetime import datetime
import os
import wget
import time
## CFG
cfg_model_path = "best.pt"
cfg_enable_url_download = True
if cfg_enable_url_download:
url = "https://archive.org/download/best_20230416/best.pt" #Configure this if you set cfg_enable_url_download to True
cfg_model_path = f"models/{url.split('/')[-1:][0]}" #config model path from url name
## END OF CFG
def imageInput(device, src):
if src == 'Upload your own data.':
image_file = st.file_uploader("Upload An Image", type=['png', 'jpeg', 'jpg'])
col1, col2 = st.columns(2)
if image_file is not None:
img = Image.open(image_file)
with col1:
st.image(img, caption='Uploaded Image', use_column_width='always')
ts = datetime.timestamp(datetime.now())
imgpath = os.path.join('data/uploads', str(ts)+image_file.name)
outputpath = os.path.join('data/outputs', os.path.basename(imgpath))
with open(imgpath, mode="wb") as f:
f.write(image_file.getbuffer())
#call Model prediction--
model = torch.hub.load('ultralytics/yolov5', 'custom', path='best.pt', force_reload=True)
model.cuda() if device == 'cuda' else model.cpu()
pred = model(imgpath)
pred.render() # render bbox in image
for im in pred.ims:
im_base64 = Image.fromarray(im)
im_base64.save(outputpath)
#--Display predicton
img_ = Image.open(outputpath)
with col2:
st.image(img_, caption='Model Prediction(s)', use_column_width='always')
elif src == 'From test set.':
# Image selector slider
imgpath = glob.glob('data/images/*')
imgsel = st.slider('Select random images from test set.', min_value=1, max_value=len(imgpath), step=1)
image_file = imgpath[imgsel-1]
submit = st.button("Predict!")
col1, col2 = st.columns(2)
with col1:
img = Image.open(image_file)
st.image(img, caption='Selected Image', use_column_width='always')
with col2:
if image_file is not None and submit:
#call Model prediction--
model = torch.hub.load('ultralytics/yolov5', 'custom', path=cfg_model_path, force_reload=True)
pred = model(image_file)
pred.render() # render bbox in image
for im in pred.ims:
im_base64 = Image.fromarray(im)
im_base64.save(os.path.join('data/outputs', os.path.basename(image_file)))
#--Display predicton
img_ = Image.open(os.path.join('data/outputs', os.path.basename(image_file)))
st.image(img_, caption='Model Prediction(s)')
def main():
# -- Sidebar
st.sidebar.title('⚙️Options')
datasrc = st.sidebar.radio("Select input source.", ['From test set.', 'Upload your own data.'])
option = st.sidebar.radio("Select input type.", ['Image', 'Video'])
if torch.cuda.is_available():
deviceoption = st.sidebar.radio("Select compute Device.", ['cpu', 'cuda'], disabled = False, index=1)
else:
deviceoption = st.sidebar.radio("Select compute Device.", ['cpu', 'cuda'], disabled = True, index=0)
# -- End of Sidebar
st.header('📦Obstacle Detection')
st.subheader('👈🏽 Select options left-haned menu bar.')
st.sidebar.markdown("https://github.com/thepbordin/Obstacle-Detection-for-Blind-people-Deployment")
if option == "Image":
imageInput(deviceoption, datasrc)
if __name__ == '__main__':
main()
# Downlaod Model from url.
@st.cache
def loadModel():
start_dl = time.time()
model_file = wget.download(url, out="models/")
finished_dl = time.time()
print(f"Model Downloaded, ETA:{finished_dl-start_dl}")
if cfg_enable_url_download:
loadModel()