naman1011 commited on
Commit
f823189
·
1 Parent(s): 8fa6a1e

Update model and draw boundary

Browse files
Files changed (2) hide show
  1. app.py +101 -29
  2. models/bmi.h5 +3 -0
app.py CHANGED
@@ -1,45 +1,117 @@
1
  import cv2
2
  import streamlit as st
3
- from keras_vggface.vggface import VGGFace
4
- import tensorflow as tf
5
- from tensorflow.keras.applications import ResNet50
6
- from tensorflow.keras.preprocessing import image
7
- from tensorflow.keras.models import Model
8
- from tensorflow.keras.layers import GlobalAveragePooling2D
9
  import numpy as np
10
- import pickle
11
-
12
- pickle_file_path = 'models/svm_model.pkl'
 
 
 
 
 
13
 
14
- with open(pickle_file_path, 'rb') as file:
15
- svm_model = pickle.load(file)
16
 
17
- base_model = VGGFace(model='vgg16', include_top=False, input_shape=(224, 224, 3))
18
- x = base_model.output
19
- x = GlobalAveragePooling2D()(x)
20
- model = Model(inputs=base_model.input, outputs=x)
21
 
22
- # Function to preprocess the image
23
- def preprocess_image(img):
24
- img = cv2.resize(img, (224, 224))
25
  img = image.img_to_array(img)
26
- img = np.expand_dims(img, axis=0)
27
- img = img[0] # Remove the extra dimension
28
  return img
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
- def extract_features(image_array):
31
- # img = np.squeeze(image_array, axis=0)
32
- img = np.expand_dims(image_array, axis=0)
33
- img = tf.keras.applications.resnet50.preprocess_input(img)
34
- features = model.predict(img,verbose=0)
35
- return features.flatten()
36
 
37
  # Function to predict BMI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  def predict_bmi(img):
39
  pre_img = preprocess_image(img)
40
- features = extract_features(pre_img)
41
- features = features.reshape(1,-1)
42
- pred = svm_model.predict(features)
43
  return pred
44
 
45
  def main():
 
1
  import cv2
2
  import streamlit as st
3
+ from PIL import Image
 
 
 
 
 
4
  import numpy as np
5
+ from tensorflow.keras.preprocessing import image
6
+ from tensorflow.keras.applications.vgg16 import preprocess_input
7
+ import tensorflow as tf
8
+ from base64 import b64decode, b64encode
9
+ import PIL
10
+ import io
11
+ import html
12
+ import time
13
 
 
 
14
 
15
+ model_file_path = 'models/bmi.h5'
16
+ model = tf.keras.models.load_model(model_file_path)
 
 
17
 
18
+ # Preprocess the images for VGG16
19
+ def preprocess_image(img_path):
20
+ img = image.load_img(img_path, target_size = (224, 224))
21
  img = image.img_to_array(img)
22
+ img = np.expand_dims(img, axis = 0)
23
+ img = preprocess_input(img)
24
  return img
25
+
26
+
27
+ # function to convert OpenCV Rectangle bounding box image into base64 byte string to be overlayed on video stream
28
+ def bbox_to_bytes(bbox_array):
29
+ """
30
+ Params:
31
+ bbox_array: Numpy array (pixels) containing rectangle to overlay on video stream.
32
+ Returns:
33
+ bytes: Base64 image byte string
34
+ """
35
+ # convert array into PIL image
36
+ bbox_PIL = PIL.Image.fromarray(bbox_array, 'RGBA')
37
+ iobuf = io.BytesIO()
38
+ # format bbox into png for return
39
+ bbox_PIL.save(iobuf, format='png')
40
+ # format return string
41
+ bbox_bytes = 'data:image/png;base64,{}'.format((str(b64encode(iobuf.getvalue()), 'utf-8')))
42
+
43
+ return bbox_bytes
44
+
45
+ # base_model = VGGFace(model='vgg16', include_top=False, input_shape=(224, 224, 3))
46
+ # x = base_model.output
47
+ # x = GlobalAveragePooling2D()(x)
48
+ # model = Model(inputs=base_model.input, outputs=x)
49
+
50
+ # # Function to preprocess the image
51
+ # def preprocess_image(img):
52
+ # img = cv2.resize(img, (224, 224))
53
+ # img = image.img_to_array(img)
54
+ # img = np.expand_dims(img, axis=0)
55
+ # img = img[0] # Remove the extra dimension
56
+ # return img
57
 
58
+ # def extract_features(image_array):
59
+ # # img = np.squeeze(image_array, axis=0)
60
+ # img = np.expand_dims(image_array, axis=0)
61
+ # img = tf.keras.applications.resnet50.preprocess_input(img)
62
+ # features = model.predict(img,verbose=0)
63
+ # return features.flatten()
64
 
65
  # Function to predict BMI
66
+
67
+ def draw_boundary(img):
68
+ # initialize the Haar Cascade face detection model
69
+ face_cascade = cv2.CascadeClassifier(cv2.samples.findFile(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml'))
70
+
71
+ # initialze bounding box to empty
72
+ bbox = ''
73
+ count = 0
74
+ while True:
75
+
76
+ # create transparent overlay for bounding box
77
+ bbox_array = np.zeros([480,640,4], dtype=np.uint8)
78
+
79
+ # grayscale image for face detection
80
+ gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
81
+
82
+ # get face region coordinates
83
+ faces = face_cascade.detectMultiScale(gray)
84
+ # get face bounding box for overlay
85
+ for (x, y, w, h) in faces:
86
+ # Extract the face region from the frame
87
+ face = img[y:y+h, x:x+w]
88
+
89
+ # Preprocess the face image
90
+ face = cv2.resize(face, (224, 224))
91
+ face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
92
+ face = preprocess_input(face)/255.
93
+ face = np.expand_dims(face, axis=0)
94
+
95
+ # Predict BMI using the pre-trained model
96
+ bmi = model.predict(face)[0][0]
97
+
98
+ # Draw the predicted BMI on the frame
99
+ bbox_array = cv2.putText(bbox_array, f'BMI: {bmi:.2f}', (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (0, 255, 0), 2)
100
+
101
+ # Draw a rectangle around the face
102
+ bbox_array = cv2.rectangle(bbox_array, (x, y), (x+w, y+h), (255, 0, 0), 2)
103
+
104
+ bbox_array[:,:,3] = (bbox_array.max(axis = 2) > 0 ).astype(int) * 255
105
+ # convert overlay of bbox into bytes
106
+ bbox_bytes = bbox_to_bytes(bbox_array)
107
+ # update bbox so next frame gets new overlay
108
+ bbox = bbox_bytes
109
+
110
+ return img
111
+
112
  def predict_bmi(img):
113
  pre_img = preprocess_image(img)
114
+ pred = draw_boundary(pre_img)
 
 
115
  return pred
116
 
117
  def main():
models/bmi.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72043cba1fe1ab39ff72bf8581ed4547a9506376f2befdbd680745984684403f
3
+ size 136023928