Spaces:
Sleeping
Sleeping
File size: 3,454 Bytes
a046c35 8aa0ccd a046c35 8aa0ccd a046c35 8aa0ccd a046c35 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import cv2
import streamlit as st
import tempfile
import torch
from torchvision import transforms
from mtcnn import MTCNN
from skimage.feature import hog
import pickle
import torch.nn as nn
from torchvision.models import resnet50
def preprocess_image_siamese(img):
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor()
])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return transform(img)
def preprocess_image_svm(img):
img= cv2.resize(img, (224, 224))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def extract_hog_features(img):
hog_features = hog(img, orientations=9,
pixels_per_cell=(8, 8),
cells_per_block=(2, 2))
return hog_features
def get_face(img):
detector = MTCNN()
faces = detector.detect_faces(img)
x1,y1,w,h = faces[0]['box']
x1, y1 = abs(x1), abs(y1)
x2 = abs(x1+w)
y2 = abs(y1+h)
store_face = img[y1:y2,x1:x2]
return store_face
def verify(img1, img2, model_type):
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_img1:
temp_img1.write(img1.read())
temp_img1_path = temp_img1.name
with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_img2:
temp_img2.write(img2.read())
temp_img2_path = temp_img2.name
img1p = cv2.imread(temp_img1_path)
img2p = cv2.imread(temp_img2_path)
face1 = get_face(img1p)
face2 = get_face(img2p)
if face1 and face2:
st.image([face1, face2], caption=["Image 1", "Image 2"], width=200)
# if model_type == "Siamese":
# face1 = preprocess_image_siamese(face1)
# face2 = preprocess_image_siamese(face2)
# # Get predictions
# with torch.no_grad():
# prediction = model(face1_tensor, face2_tensor)
# # Threshold decision
# if prediction.item() > 0.5:
# st.write("Matched")
# else:
# st.write("Not Matched")
# st.write(f"Confidence: {prediction.item():.4f}")
if model_type == "HOG-SVM":
with open('svm.pkl', 'rb') as f:
svm = pickle.load(f)
with open('pca.pkl', 'rb') as f:
pca = pickle.load(f)
face1 = preprocess_image_svm(face1)
face2 = preprocess_image_svm(face2)
hog1 = extract_hog_features(face1)
hog2 = extract_hog_features(face2)
hog1_pca = pca.transform([hog1])
hog2_pca = pca.transform([hog2])
pred1 = svm.predict(hog1_pca)
pred2 = svm.predict(hog2_pca)
if pred1 == 1 and pred2 == 1:
st.write("Matched")
else:
st.write("Not Matched")
else:
st.write("Face not detected in one or both images")
def main():
st.title("Face Verification App")
model_type = st.selectbox("Select Model", ["Siamese", "HOG-SVM"])
uploaded_img1 = st.file_uploader("Upload Image 1", type=["jpg", "png"])
uploaded_img2 = st.file_uploader("Upload Image 2", type=["jpg", "png"])
if uploaded_img1 and uploaded_img2:
if st.button("Verify Faces"):
verify(uploaded_img1, uploaded_img2, model_type)
if __name__ == "__main__":
main() |