File size: 5,689 Bytes
41b4975
d1f74f7
41b4975
 
 
 
 
d1f74f7
41b4975
52b0d10
41b4975
 
52b0d10
fd1ffce
 
41b4975
 
 
 
 
 
 
 
 
d1f74f7
41b4975
 
 
 
 
 
 
 
 
 
 
 
 
9f29ff6
 
41b4975
9f29ff6
 
41b4975
 
 
 
 
 
 
 
9f29ff6
 
41b4975
9f29ff6
 
 
 
 
 
 
 
 
 
 
feb4660
 
 
 
 
 
 
 
9f29ff6
 
 
 
 
feb4660
9f29ff6
 
 
 
 
 
 
 
 
 
feb4660
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd1ffce
 
 
d31b16f
feb4660
 
 
ba2d372
 
 
41b4975
9f29ff6
 
fd1ffce
9f29ff6
 
 
feb4660
 
9f29ff6
 
 
fd1ffce
9f29ff6
 
 
feb4660
9f29ff6
 
 
fd1ffce
9f29ff6
fd1ffce
9f29ff6
 
 
 
 
fd1ffce
9f29ff6
 
fd1ffce
9f29ff6
 
 
 
 
fd1ffce
9f29ff6
 
 
41b4975
 
 
9f29ff6
fd1ffce
 
9f29ff6
41b4975
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
#%%
import gradio as gr
from PIL import Image
from torchvision import transforms
from siamese_nn import Siamese_nn
import torch, os
import torch.nn.functional as F

model = Siamese_nn()
weights = torch.load('trained_model', map_location=torch.device('cpu'))
model.load_state_dict(weights)
model.eval()

file_list = os.listdir("data")

examples = []
usersIndexes = []
for x in file_list:
    if x[0:2] not in usersIndexes:
        usersIndexes.append(x[0:2])
    
for user in usersIndexes:
    usersImages = [x for x in file_list if str(user) in x]
    notUsersImages = [x for x in file_list if str(user) not in x]

    for userImage in usersImages:
        for userImageCopy in usersImages:
            examples.append([userImage, userImageCopy, 0])
            for notUser in notUsersImages:
                examples.append([userImage, notUser, 1])

def predict(input1, input2, label=None):
    img1_PIL = Image.open(f'data/{input1}')
    img2_PIL = Image.open(f'data/{input2}')
    img1 = transforms.ToTensor()(img1_PIL).unsqueeze(0)
    img2 = transforms.ToTensor()(img2_PIL).unsqueeze(0)

    for el in examples:
        if input1 == el[0] and input2 == el[1] and el[2] == 0:
            label = 'Scans of the same finger'
            break
        if input1 == el[0] and input2 == el[1] and el[2] == 1:
            label = 'Scans of different fingers'        
    
    with torch.no_grad():
        out1, out2 = model(img1, img2)
        pred = F.pairwise_distance(out1, out2)
        if pred < 0.6:
            decision = f'Access granted, confidence: {pred.item():4f}'
        else:
            decision = f'Access denied, confidence: {pred.item():4f}'
    
    return img1_PIL, img2_PIL, decision, label 
#%%
css = """
.gradio-container {
    height: 100vh;
    max-width: 1024px !important;
}

.my_img {
    max-height: 288px !important;
    object-fit: cover !important;
}

.img-select div.secondary-wrap {
    position: relative;
}
.img-select div.icon-wrap {
    position: absolute;
    pointer-events: none;
    right: 0;
}
#res div h2 { color: #07ef03; }
"""

js = """
() => {
    
    label = document.querySelector("#res div h2");
    txt = label.textContent.split(",")[0]
    if (txt === 'Access granted') {
        label.style.color = "#07ef03";
        }
    if (txt === 'Access denied') {
        label.style.color = "red";
        }
    }
"""

dropdowns = """
() => {
    input_el = document.querySelectorAll(".img-select input");
    input_el[0].type = "button";
    input_el[1].type = "button";
    
    /*
    svg = document.querySelectorAll(".img-select div.icon-wrap");
    ul = document.querySelectorAll(".img-select ul.options);
    for (let i = 0; i < input_el.length; i++){
        input_el[i].addEventListener("click", () => {
            svg[i].style.transform = "rotate(180deg)";
        })
    }*/
}
"""
def refresh():
    image = Image.open(f'data/{file_list[0]}')
    return image, image

with gr.Blocks(css=css, js=dropdowns, elem_classes=['container']) as demo:
    md = gr.Markdown(value="""# Follow the steps
                - To check model performance choose first and second image from available examples.
                - You can pair up images of the same or different fingerprints. The result from model will be automatically calculated.
                - Additionally displayed confidence shows the similarity between images. The closer to 0, the more similar - more confident model
                - Access is granted if value of confidence is below certain threshold found during model testing.""")
    with gr.Row():
        
        with gr.Row():
            drop1 =  gr.Dropdown(value=file_list[0],
                                choices=file_list,
                                label='Select first image',
                                scale=1,
                                elem_classes='img-select',
                                
                                
            )
            
            drop2 = gr.Dropdown(value=file_list[0],
                                choices=file_list,
                                label='Select second image',
                                scale=1,
                                elem_classes='img-select',
                                
            )
            label = gr.Label(value='Scans of the same finger', show_label=False)        
                                           
        with gr.Row():
            img1 = gr.Image(height=288, # unfortunately value doesn't work properly
                            width=256, 
                            interactive=False, 
                            scale=1, 
                            label='first image', 
                            show_download_button=False,
                            show_share_button=False,
                            elem_classes=['my-img'])
             
            img2 = gr.Image(height=288, 
                            width=256, 
                            interactive=False, 
                            scale=1, 
                            label='second image',
                            show_download_button=False,
                            show_share_button=False,
                            elem_classes=['my-img'])
                     
    output = gr.Label(value=predict(*examples[0])[2], elem_id='res', show_label=False)

    drop1.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
    drop2.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
    output.change(fn=None, inputs=None, js=js)
    # initial img load workaround
    demo.load(fn=refresh, inputs=None, outputs=[img1, img2])
demo.launch()

# %%