File size: 3,349 Bytes
9067733
03d287b
ddadf19
9067733
 
 
03d287b
9067733
ddadf19
 
 
 
 
 
 
 
 
 
 
9067733
 
 
 
03d287b
 
 
 
9067733
03d287b
9067733
 
 
 
 
 
 
03d287b
9067733
 
ddadf19
 
9067733
 
 
 
 
 
 
 
 
 
 
 
03d287b
9067733
ddadf19
 
 
 
 
 
 
 
 
 
 
 
 
03d287b
 
 
 
 
 
 
 
 
ddadf19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03d287b
ddadf19
9067733
 
 
 
 
 
03d287b
 
9067733
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import gradio as gr
from gradio.components import Dropdown

import cv2 as cv
import torch
from torchvision import transforms
from DeePixBiS.Model import DeePixBiS

import yaml
import numpy as np
import pandas as pd
from skimage.io import imread, imsave
# from tddfa.TDDFA import TDDFA
from tddfa.utils.depth import depth
from tddfa.TDDFA_ONNX import TDDFA_ONNX

import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
os.environ['OMP_NUM_THREADS'] = '4'

labels = ['Live', 'Spoof']
thresh = 0.45
examples = [
    ['examples/1_1_21_2_33_scene_fake.jpg', "DeePixBiS"],
    ['examples/frame150_real.jpg', "DeePixBiS"],
    ['examples/1_2.avi_125_real.jpg', "DeePixBiS"],
    ['examples/1_3.avi_25_fake.jpg', "DeePixBiS"]]
device = torch.device("cpu")
faceClassifier = cv.CascadeClassifier('./DeePixBiS/Classifiers/haarface.xml')
tfms = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize((224, 224)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
model = DeePixBiS(pretrained=False)
model.load_state_dict(torch.load('./DeePixBiS/DeePixBiS.pth'))
model.eval()

cfg = yaml.load(open('tddfa/configs/mb1_120x120.yml'), Loader=yaml.SafeLoader)
tddfa = TDDFA_ONNX(gpu_mode=False, **cfg)

def find_largest_face(faces):
    largest_face = None
    largest_area = 0

    for (x, y, w, h) in faces:
        area = w * h
        if area > largest_area:
            largest_area = area
            largest_face = (x, y, w, h)
    return largest_face

def inference(img, model_name):
    confidences = {}
    grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    faces = faceClassifier.detectMultiScale(
        grey, scaleFactor=1.1, minNeighbors=4)
    face = find_largest_face(faces)
    
    if face is not None:
        x, y, w, h = face
        faceRegion = img[y:y + h, x:x + w]
        faceRegion = cv.cvtColor(faceRegion, cv.COLOR_BGR2RGB)
        faceRegion = tfms(faceRegion)
        faceRegion = faceRegion.unsqueeze(0)

        if model_name == 'DeePixBiS':
            mask, binary = model.forward(faceRegion)
            res = torch.mean(mask).item()
            if res < thresh:
                cls = 'Spoof'
                color = (0, 0, 255)
                res = 1 - res
            else:
                cls = 'Real'
                color = (0, 255, 0)
            
        else:
            dense_flag = True
            boxes = list(face)
            boxes.append(1)
            param_lst, roi_box_lst = tddfa(img, [boxes])
            
            ver_lst = tddfa.recon_vers(param_lst, roi_box_lst, dense_flag=dense_flag)
            img = depth(img, ver_lst, tddfa.tri, with_bg_flag=False)
            cls = 'Other'
            res = 0.5
            color = (0, 0, 255)

        label = f'{cls} {res:.2f}'
        confidences = {label: res}
        cv.rectangle(img, (x, y), (x + w, y + h), color, 2)
        cv.putText(img, label, (x, y + h + 30),
                    cv.FONT_HERSHEY_COMPLEX, 1, color)

    return img, confidences


if __name__ == '__main__':
    demo = gr.Interface(
        fn=inference,
        inputs=[gr.Image(source='webcam', shape=None, type='numpy'),
                Dropdown(["DeePixBiS", "DSDG"], value="DeePixBiS")],
        outputs=["image", gr.Label(num_top_classes=2)],
        examples=examples).queue(concurrency_count=2)
    demo.launch(share=False)