File size: 2,572 Bytes
9067733
03d287b
9067733
 
 
03d287b
9067733
 
 
 
 
03d287b
 
 
 
9067733
03d287b
9067733
 
 
 
 
 
 
03d287b
9067733
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03d287b
9067733
03d287b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9067733
 
 
 
 
 
03d287b
 
9067733
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gradio as gr
from gradio.components import Dropdown
import cv2 as cv
import torch
from torchvision import transforms
from DeePixBiS.Model import DeePixBiS


labels = ['Live', 'Spoof']
thresh = 0.45
examples = [
    ['examples/1_1_21_2_33_scene_fake.jpg', "DeePixBiS"],
    ['examples/frame150_real.jpg', "DeePixBiS"],
    ['examples/1_2.avi_125_real.jpg', "DeePixBiS"],
    ['examples/1_3.avi_25_fake.jpg', "DeePixBiS"]]
device = torch.device("cpu")
faceClassifier = cv.CascadeClassifier('./DeePixBiS/Classifiers/haarface.xml')
tfms = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
model = DeePixBiS(pretrained=False)
model.load_state_dict(torch.load('./DeePixBiS/DeePixBiS.pth'))
model.eval()


def find_largest_face(faces):
    largest_face = None
    largest_area = 0

    for (x, y, w, h) in faces:
        area = w * h
        if area > largest_area:
            largest_area = area
            largest_face = (x, y, w, h)
    return largest_face


def inference(img, model_name):
    confidences = {}
    if model_name == 'DeePixBiS':
        grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
        faces = faceClassifier.detectMultiScale(
            grey, scaleFactor=1.1, minNeighbors=4)
        face = find_largest_face(faces)
        
        if face is not None:
            x, y, w, h = face
            faceRegion = img[y:y + h, x:x + w]
            faceRegion = cv.cvtColor(faceRegion, cv.COLOR_BGR2RGB)
            faceRegion = tfms(faceRegion)
            faceRegion = faceRegion.unsqueeze(0)
            mask, binary = model.forward(faceRegion)
            res = torch.mean(mask).item()
            if res < thresh:
                cls = 'Spoof'
                color = (0, 0, 255)
                res = 1 - res
            else:
                cls = 'Real'
                color = (0, 255, 0)
            label = f'{cls} {res:.2f}'
            cv.rectangle(img, (x, y), (x + w, y + h), color, 2)
            cv.putText(img, label, (x, y + h + 30),
                    cv.FONT_HERSHEY_COMPLEX, 1, color)
            confidences = {label: res}
    return img, confidences


if __name__ == '__main__':
    demo = gr.Interface(
        fn=inference,
        inputs=[gr.Image(source='webcam', shape=None, type='numpy'),
                Dropdown(["DeePixBiS", "DSDG"], value="DeePixBiS")],
        outputs=["image", gr.Label(num_top_classes=2)],
        examples=examples).queue(concurrency_count=2)
    demo.launch(share=False)