updated app
Browse files- app.py +16 -13
- requirements.txt +1 -1
app.py
CHANGED
@@ -5,7 +5,7 @@ from PIL import Image
|
|
5 |
from io import BytesIO
|
6 |
import base64
|
7 |
|
8 |
-
|
9 |
# The session_state function allows us to initialize and save variables across for across
|
10 |
# session states. This is a valuable feature that enables us to take different actions
|
11 |
# depending on the state of selected variables in the code. If this is not done then
|
@@ -16,18 +16,19 @@ import base64
|
|
16 |
# previous value (ss.file_uploaded_id) and if they are the same then we know we
|
17 |
# don't need to call the face detection model again. We just simply need to process
|
18 |
# the previous set of detections.
|
19 |
-
|
20 |
|
21 |
# Create application title and file uploader widget.
|
22 |
st.title("OpenCV Deep Learning based Face Detection")
|
23 |
-
img_file_buffer = st.file_uploader("Choose a file", type=[
|
24 |
|
25 |
# Initialize session state variables
|
26 |
-
if
|
27 |
st.session_state.file_uploaded_name = None
|
28 |
-
if
|
29 |
st.session_state.detections = None
|
30 |
|
|
|
31 |
# Function for detecting faces in an image.
|
32 |
def detectFaceOpenCVDnn(net, frame):
|
33 |
# Create a blob from the image and apply some pre-processing.
|
@@ -38,6 +39,7 @@ def detectFaceOpenCVDnn(net, frame):
|
|
38 |
detections = net.forward()
|
39 |
return detections
|
40 |
|
|
|
41 |
# Function for annotating the image with bounding boxes for each detected face.
|
42 |
def process_detections(frame, detections, conf_threshold=0.5):
|
43 |
bboxes = []
|
@@ -57,6 +59,7 @@ def process_detections(frame, detections, conf_threshold=0.5):
|
|
57 |
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), bb_line_thickness, cv2.LINE_8)
|
58 |
return frame, bboxes
|
59 |
|
|
|
60 |
# Function to load the DNN model.
|
61 |
@st.cache_resource()
|
62 |
def load_model():
|
@@ -65,13 +68,13 @@ def load_model():
|
|
65 |
net = cv2.dnn.readNetFromCaffe(configFile, modelFile)
|
66 |
return net
|
67 |
|
|
|
68 |
# Function to generate a download link for output file.
|
69 |
-
def get_image_download_link(img,
|
70 |
buffered = BytesIO()
|
71 |
img.save(buffered, format="JPEG")
|
72 |
-
|
73 |
-
|
74 |
-
return href
|
75 |
|
76 |
net = load_model()
|
77 |
|
@@ -85,11 +88,11 @@ if img_file_buffer is not None:
|
|
85 |
# Create placeholders to display input and output images.
|
86 |
placeholders = st.columns(2)
|
87 |
# Display Input image in the first placeholder.
|
88 |
-
placeholders[0].image(image, channels=
|
89 |
placeholders[0].text("Input Image")
|
90 |
|
91 |
# Create a Slider and get the threshold from the slider.
|
92 |
-
conf_threshold = st.slider("SET Confidence Threshold", min_value=0.0, max_value=1.0, step
|
93 |
|
94 |
# Check if the loaded image is "new", if so call the face detection model function.
|
95 |
if file_name != st.session_state.file_uploaded_name:
|
@@ -105,10 +108,10 @@ if img_file_buffer is not None:
|
|
105 |
out_image, _ = process_detections(image, st.session_state.detections, conf_threshold=conf_threshold)
|
106 |
|
107 |
# Display Detected faces.
|
108 |
-
placeholders[1].image(out_image, channels=
|
109 |
placeholders[1].text("Output Image")
|
110 |
|
111 |
# Convert OpenCV image to PIL.
|
112 |
out_image = Image.fromarray(out_image[:, :, ::-1])
|
113 |
# Create a link for downloading the output file.
|
114 |
-
|
|
|
5 |
from io import BytesIO
|
6 |
import base64
|
7 |
|
8 |
+
# ---------------------------------------------------------------------------------------
|
9 |
# The session_state function allows us to initialize and save variables across for across
|
10 |
# session states. This is a valuable feature that enables us to take different actions
|
11 |
# depending on the state of selected variables in the code. If this is not done then
|
|
|
16 |
# previous value (ss.file_uploaded_id) and if they are the same then we know we
|
17 |
# don't need to call the face detection model again. We just simply need to process
|
18 |
# the previous set of detections.
|
19 |
+
# ---------------------------------------------------------------------------------------
|
20 |
|
21 |
# Create application title and file uploader widget.
|
22 |
st.title("OpenCV Deep Learning based Face Detection")
|
23 |
+
img_file_buffer = st.file_uploader("Choose a file", type=["jpg", "jpeg", "png"])
|
24 |
|
25 |
# Initialize session state variables
|
26 |
+
if "file_uploaded_name" not in st.session_state:
|
27 |
st.session_state.file_uploaded_name = None
|
28 |
+
if "detections" not in st.session_state:
|
29 |
st.session_state.detections = None
|
30 |
|
31 |
+
|
32 |
# Function for detecting faces in an image.
|
33 |
def detectFaceOpenCVDnn(net, frame):
|
34 |
# Create a blob from the image and apply some pre-processing.
|
|
|
39 |
detections = net.forward()
|
40 |
return detections
|
41 |
|
42 |
+
|
43 |
# Function for annotating the image with bounding boxes for each detected face.
|
44 |
def process_detections(frame, detections, conf_threshold=0.5):
|
45 |
bboxes = []
|
|
|
59 |
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), bb_line_thickness, cv2.LINE_8)
|
60 |
return frame, bboxes
|
61 |
|
62 |
+
|
63 |
# Function to load the DNN model.
|
64 |
@st.cache_resource()
|
65 |
def load_model():
|
|
|
68 |
net = cv2.dnn.readNetFromCaffe(configFile, modelFile)
|
69 |
return net
|
70 |
|
71 |
+
|
72 |
# Function to generate a download link for output file.
|
73 |
+
def get_image_download_link(img, label, filename):
|
74 |
buffered = BytesIO()
|
75 |
img.save(buffered, format="JPEG")
|
76 |
+
st.download_button(label, data=buffered.getvalue(), file_name=filename, mime="image/png")
|
77 |
+
|
|
|
78 |
|
79 |
net = load_model()
|
80 |
|
|
|
88 |
# Create placeholders to display input and output images.
|
89 |
placeholders = st.columns(2)
|
90 |
# Display Input image in the first placeholder.
|
91 |
+
placeholders[0].image(image, channels="BGR")
|
92 |
placeholders[0].text("Input Image")
|
93 |
|
94 |
# Create a Slider and get the threshold from the slider.
|
95 |
+
conf_threshold = st.slider("SET Confidence Threshold", min_value=0.0, max_value=1.0, step=0.01, value=0.5)
|
96 |
|
97 |
# Check if the loaded image is "new", if so call the face detection model function.
|
98 |
if file_name != st.session_state.file_uploaded_name:
|
|
|
108 |
out_image, _ = process_detections(image, st.session_state.detections, conf_threshold=conf_threshold)
|
109 |
|
110 |
# Display Detected faces.
|
111 |
+
placeholders[1].image(out_image, channels="BGR")
|
112 |
placeholders[1].text("Output Image")
|
113 |
|
114 |
# Convert OpenCV image to PIL.
|
115 |
out_image = Image.fromarray(out_image[:, :, ::-1])
|
116 |
# Create a link for downloading the output file.
|
117 |
+
get_image_download_link(out_image, "Download Output Image", "output.jpg")
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
numpy
|
2 |
-
streamlit
|
3 |
opencv-python-headless
|
4 |
pillow
|
|
|
1 |
numpy
|
2 |
+
streamlit==1.44.1
|
3 |
opencv-python-headless
|
4 |
pillow
|