Spaces:
Sleeping
Sleeping
File size: 2,509 Bytes
c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc af18329 c2b7cdc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 |
import cv2
import streamlit as st
import tempfile
import torch
from torchvision import transforms
from mtcnn import MTCNN
from skimage.feature import hog
import joblib
import numpy as np
# Preprocessing for Siamese Model
def preprocess_image_siamese(img):
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor()
])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return transform(img)
# Preprocessing for SVM model (converting to grayscale)
def preprocess_image_svm(img):
img = cv2.resize(img, (224, 224))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
# Extract HOG Features
def extract_hog_features(img):
hog_features = hog(img, orientations=9, pixels_per_cell=(16, 16), cells_per_block=(4, 4))
return hog_features
# Detect faces using MTCNN
def get_face(img):
detector = MTCNN()
faces = detector.detect_faces(img)
if faces:
x1, y1, w, h = faces[0]['box']
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + w, y1 + h
return img[y1:y2, x1:x2]
return None
# Function to verify face (either HOG-SVM or Siamese model)
def verify(image, model, person):
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_image:
temp_image.write(image.read())
temp_image_path = temp_image.name
image = cv2.imread(temp_image_path)
face = get_face(image)
if face is not None:
if model_type == "HOG-SVM":
with open(f'./svm_{lower(person)}.pkl', 'rb') as f:
svm = joblib.load(f)
with open(f'./pca_{lower(person)}.pkl', 'rb') as f:
pca = joblib.load(f)
face = preprocess_image_svm(face)
hog = extract_hog_features(face)
hog_pca = pca.transform([hog])
pred = svm.predict(hog_pca)
if pred == 1:
st.write("Match")
else:
st.write("Not Match")
else:
st.write("Face not detected in one or both images")
# Main function to handle Streamlit interaction
def main():
st.title("Real-time Face Verification App")
model = st.selectbox("Select Model", ["Siamese", "HOG-SVM"])
person = st.selectbox("Select Person", ["Theo"])
enable = st.checkbox("Enable camera")
captured_image = st.camera_input("Take a picture", disabled=not enable)
if captured_image:
verify(captured_image, model, person)
if __name__ == "__main__":
main()
|