Spaces:
Runtime error
Runtime error
alexrods
commited on
Commit
•
29a05b7
1
Parent(s):
de532ae
add documentation in app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import numpy as np
|
2 |
import tensorflow as tf
|
3 |
import streamlit as st
|
@@ -5,25 +13,34 @@ from PIL import Image
|
|
5 |
from huggingface_hub import from_pretrained_keras
|
6 |
import cv2
|
7 |
|
8 |
-
|
9 |
model = from_pretrained_keras("keras-io/deeplabv3p-resnet50")
|
10 |
|
|
|
11 |
colormap = np.array([[0,0,0], [31,119,180], [44,160,44], [44, 127, 125], [52, 225, 143],
|
12 |
[217, 222, 163], [254, 128, 37], [130, 162, 128], [121, 7, 166], [136, 183, 248],
|
13 |
[85, 1, 76], [22, 23, 62], [159, 50, 15], [101, 93, 152], [252, 229, 92],
|
14 |
[167, 173, 17], [218, 252, 252], [238, 126, 197], [116, 157, 140], [214, 220, 252]], dtype=np.uint8)
|
15 |
-
|
|
|
16 |
img_size = 512
|
17 |
|
18 |
def read_image(image):
|
|
|
|
|
|
|
|
|
19 |
image = tf.convert_to_tensor(image)
|
20 |
image.set_shape([None, None, 3])
|
21 |
image = tf.image.resize(images=image, size=[img_size, img_size])
|
22 |
-
image = image /
|
23 |
return image
|
24 |
|
25 |
|
26 |
def infer(model, image_tensor):
|
|
|
|
|
|
|
27 |
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
|
28 |
predictions = np.squeeze(predictions)
|
29 |
predictions = np.argmax(predictions, axis=2)
|
@@ -31,6 +48,10 @@ def infer(model, image_tensor):
|
|
31 |
|
32 |
|
33 |
def decode_segmentation_masks(mask, colormap, n_classes):
|
|
|
|
|
|
|
|
|
34 |
r = np.zeros_like(mask).astype(np.uint8)
|
35 |
g = np.zeros_like(mask).astype(np.uint8)
|
36 |
b = np.zeros_like(mask).astype(np.uint8)
|
@@ -44,6 +65,9 @@ def decode_segmentation_masks(mask, colormap, n_classes):
|
|
44 |
|
45 |
|
46 |
def get_overlay(image, colored_mask):
|
|
|
|
|
|
|
47 |
image = tf.keras.preprocessing.image.array_to_img(image)
|
48 |
image = np.array(image).astype(np.uint8)
|
49 |
overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)
|
@@ -51,6 +75,14 @@ def get_overlay(image, colored_mask):
|
|
51 |
|
52 |
|
53 |
def segmentation(input_image):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
image_tensor = read_image(input_image)
|
55 |
prediction_mask = infer(image_tensor=image_tensor, model=model)
|
56 |
prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20)
|
@@ -58,7 +90,10 @@ def segmentation(input_image):
|
|
58 |
return (overlay, prediction_colormap)
|
59 |
|
60 |
|
|
|
|
|
61 |
st.header("Segmentación de partes del cuerpo humano")
|
|
|
62 |
st.subheader("Demo de Spaces usando Streamlit y segmentacion de imagenes [Space original](https://huggingface.co/spaces/PKaushik/Human-Part-Segmentation)")
|
63 |
|
64 |
st.markdown("Sube una imagen o selecciona un ejemplo para segmentar las distintas partes del cuerpo humano")
|
@@ -102,4 +137,7 @@ if file_imagen is not None:
|
|
102 |
with col2:
|
103 |
st.subheader("Mask: ")
|
104 |
st.image(output[1], width=425)
|
105 |
-
|
|
|
|
|
|
|
|
1 |
+
'''
|
2 |
+
This Python script is a web application that performs human body part segmentation
|
3 |
+
using a pre-trained deep learning model called DeepLabv3+.
|
4 |
+
The application is built using the Streamlit library and uses the Hugging Face Hub
|
5 |
+
to download the pre-trained model.
|
6 |
+
'''
|
7 |
+
|
8 |
+
# import libraries
|
9 |
import numpy as np
|
10 |
import tensorflow as tf
|
11 |
import streamlit as st
|
|
|
13 |
from huggingface_hub import from_pretrained_keras
|
14 |
import cv2
|
15 |
|
16 |
+
# The model used is the DeepLabv3+ model with a ResNet50 backbone.
|
17 |
model = from_pretrained_keras("keras-io/deeplabv3p-resnet50")
|
18 |
|
19 |
+
# A colormap is defined to map the predicted segmentation masks to colors for better visualization
|
20 |
colormap = np.array([[0,0,0], [31,119,180], [44,160,44], [44, 127, 125], [52, 225, 143],
|
21 |
[217, 222, 163], [254, 128, 37], [130, 162, 128], [121, 7, 166], [136, 183, 248],
|
22 |
[85, 1, 76], [22, 23, 62], [159, 50, 15], [101, 93, 152], [252, 229, 92],
|
23 |
[167, 173, 17], [218, 252, 252], [238, 126, 197], [116, 157, 140], [214, 220, 252]], dtype=np.uint8)
|
24 |
+
|
25 |
+
# size of the input image is defined as 512x512 pixels
|
26 |
img_size = 512
|
27 |
|
28 |
def read_image(image):
|
29 |
+
'''
|
30 |
+
read_image: reads in the input image and preprocesses it
|
31 |
+
by resizing it to the defined size and normalizing it to values between -1 and 1
|
32 |
+
'''
|
33 |
image = tf.convert_to_tensor(image)
|
34 |
image.set_shape([None, None, 3])
|
35 |
image = tf.image.resize(images=image, size=[img_size, img_size])
|
36 |
+
image = image / 255
|
37 |
return image
|
38 |
|
39 |
|
40 |
def infer(model, image_tensor):
|
41 |
+
'''
|
42 |
+
infer: performs inference using the pre-trained model and returns the predicted segmentation mask.
|
43 |
+
'''
|
44 |
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
|
45 |
predictions = np.squeeze(predictions)
|
46 |
predictions = np.argmax(predictions, axis=2)
|
|
|
48 |
|
49 |
|
50 |
def decode_segmentation_masks(mask, colormap, n_classes):
|
51 |
+
'''
|
52 |
+
decode_segmentation_masks: maps the predicted segmentation mask to the defined colormap
|
53 |
+
to produce a colored mask.
|
54 |
+
'''
|
55 |
r = np.zeros_like(mask).astype(np.uint8)
|
56 |
g = np.zeros_like(mask).astype(np.uint8)
|
57 |
b = np.zeros_like(mask).astype(np.uint8)
|
|
|
65 |
|
66 |
|
67 |
def get_overlay(image, colored_mask):
|
68 |
+
'''
|
69 |
+
get_overlay: overlays the colored mask on the original image for visualization
|
70 |
+
'''
|
71 |
image = tf.keras.preprocessing.image.array_to_img(image)
|
72 |
image = np.array(image).astype(np.uint8)
|
73 |
overlay = cv2.addWeighted(image, 0.35, colored_mask, 0.65, 0)
|
|
|
75 |
|
76 |
|
77 |
def segmentation(input_image):
|
78 |
+
'''
|
79 |
+
segmentation:
|
80 |
+
returns,
|
81 |
+
- prediction_colormap: function is used to convert the prediction mask into a colored mask,
|
82 |
+
where each class is assigned a unique color from a predefined color map.
|
83 |
+
|
84 |
+
- overlay: used to create an overlay image by blending the original input image with the colored mask
|
85 |
+
'''
|
86 |
image_tensor = read_image(input_image)
|
87 |
prediction_mask = infer(image_tensor=image_tensor, model=model)
|
88 |
prediction_colormap = decode_segmentation_masks(prediction_mask, colormap, 20)
|
|
|
90 |
return (overlay, prediction_colormap)
|
91 |
|
92 |
|
93 |
+
## Streamlit interface
|
94 |
+
|
95 |
st.header("Segmentación de partes del cuerpo humano")
|
96 |
+
|
97 |
st.subheader("Demo de Spaces usando Streamlit y segmentacion de imagenes [Space original](https://huggingface.co/spaces/PKaushik/Human-Part-Segmentation)")
|
98 |
|
99 |
st.markdown("Sube una imagen o selecciona un ejemplo para segmentar las distintas partes del cuerpo humano")
|
|
|
137 |
with col2:
|
138 |
st.subheader("Mask: ")
|
139 |
st.image(output[1], width=425)
|
140 |
+
else:
|
141 |
+
st.write("Ocurrió un error al segmentar la imagen, intenta con otra imagen")
|
142 |
+
|
143 |
+
|