File size: 6,926 Bytes
13ca877
afa894f
 
 
 
 
 
 
 
 
 
 
 
c7bae90
afa894f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13ca877
afa894f
13ca877
 
 
 
 
c82b7a8
13ca877
 
 
 
e8bb798
13ca877
 
 
 
 
 
afa894f
13ca877
 
 
 
afa894f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6bdc76e
 
8927897
6bdc76e
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
from . import face_recognition_model
import numpy as np
import cv2
from matplotlib import pyplot as plt
import torch
# In the below line,remove '.' while working on your local system. However Make sure that '.' is present before face_recognition_model while uploading to the server, Do not remove it.
from .face_recognition_model import *
from PIL import Image
import base64
import io
import os
import joblib
import pickle
from joblib import load
# Add more imports if required



###########################################################################################################################################
#         Caution: Don't change any of the filenames, function names and definitions                                                      #
#        Always use the current_path + file_name for refering any files, without it we cannot access files on the server                  # 
###########################################################################################################################################

# Current_path stores absolute path of the file from where it runs. 
current_path = os.path.dirname(os.path.abspath(__file__))

#1) The below function is used to detect faces in the given image.
#2) It returns only one image which has maximum area out of all the detected faces in the photo.
#3) If no face is detected,then it returns zero(0).

def detected_face(image):
    eye_haar = current_path + '/haarcascade_eye.xml'
    face_haar = current_path + '/haarcascade_frontalface_default.xml'
    face_cascade = cv2.CascadeClassifier(face_haar)
    eye_cascade = cv2.CascadeClassifier(eye_haar)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    face_areas=[]
    images = []
    required_image=0
    for i, (x,y,w,h) in enumerate(faces):
        face_cropped = gray[y:y+h, x:x+w]
        face_areas.append(w*h)
        images.append(face_cropped)
        required_image = images[np.argmax(face_areas)]
        required_image = Image.fromarray(required_image)
    return required_image


#1) Images captured from mobile is passed as parameter to the below function in the API call. It returns the similarity measure between given images.
#2) The image is passed to the function in base64 encoding, Code for decoding the image is provided within the function.
#3) Define an object to your siamese network here in the function and load the weight from the trained network, set it in evaluation mode.
#4) Get the features for both the faces from the network and return the similarity measure, Euclidean,cosine etc can be it. But choose the Relevant measure.
#5) For loading your model use the current_path+'your model file name', anyhow detailed example is given in comments to the function 
#Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
def get_similarity(img1, img2):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    det_img1 = detected_face(img1)
    det_img2 = detected_face(img2)
    if(det_img1 == 0 or det_img2 == 0):
        det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
        det_img2 = Image.fromarray(cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY))
    face1 = trnscm(det_img1).unsqueeze(0)
    face2 = trnscm(det_img2).unsqueeze(0)
    ##########################################################################################
    ##Example for loading a model using weight state dictionary:                            ##
    ## feature_net = light_cnn() #Example Network                                           ##
    ## model = torch.load(current_path + '/siamese_model.t7', map_location=device)          ##
    ## feature_net.load_state_dict(model['net_dict'])                                       ##
    ##                                                                                      ##
    ##current_path + '/<network_definition>' is path of the saved model if present in       ##
    ##the same path as this file, we recommend to put in the same directory                 ##
    ##########################################################################################
    ##########################################################################################
 

    feature_net = Siamese()
    ckpt = torch.load(current_path + '/siamese_model.t7', map_location=device)
    # model_path = current_path + "/Hackathon-setup/siamese_model.t7"
    feature_net.load_state_dict(ckpt['net_dict'])
    # model.eval()

    with torch.no_grad():
        output1, output2 = feature_net(face1.to(device), face2.to(device))
        # Calculate similarity measure - for instance, using cosine similarity
        euclidean_distance = F.pairwise_distance(output1, output2)
    
    return euclidean_distance.item()

    # ckpt = torch.load(current_path + "/Hackathon-setup/siamese_model.t7", map_location=device)
    # # YOUR CODE HERE, load the model
    # similarity_measure = ckpt(face1.to(device), face2.to(device))
    # # YOUR CODE HERE, return similarity measure using your model
    
    # return similarity_measure


    # Load the Siamese network model
    
    
#1) Image captured from mobile is passed as parameter to this function in the API call, It returns the face class in the string form ex: "Person1"
#2) The image is passed to the function in base64 encoding, Code to decode the image provided within the function
#3) Define an object to your network here in the function and load the weight from the trained network, set it in evaluation mode
#4) Perform necessary transformations to the input(detected face using the above function).
#5) Along with the siamese, you need the classifier as well, which is to be finetuned with the faces that you are training
##Caution: Don't change the definition or function name; for loading the model use the current_path for path example is given in comments to the function
def get_face_class(img1):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    
    det_img1 = detected_face(img1)
    if(det_img1 == 0):
        det_img1 = Image.fromarray(cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY))
    ##YOUR CODE HERE, return face class here
    ##Hint: you need a classifier finetuned for your classes, it takes o/p of siamese as i/p to it
    ##Better Hint: Siamese experiment is covered in one of the labs
    face1 = trnscm(det_img1).unsqueeze(0)

    feature_net = Siamese().to(device)
    model = torch.load(current_path + '/siamese_model.t7', map_location=device)  ##
    feature_net.load_state_dict(model['net_dict'])
    feature_net.eval()

    output1, output2 = feature_net(face1.to(device), face1.to(device))
    output1 = output1.detach().numpy()

    clf_model = load(current_path + '/clf_model.joblib')
    label = clf_model.predict(output1)
    return label