Spaces:
Sleeping
Sleeping
#%% | |
import gradio as gr | |
from PIL import Image | |
from torchvision import transforms | |
from siamese_nn import Siamese_nn | |
import torch, os | |
import torch.nn.functional as F | |
model = Siamese_nn() | |
weights = torch.load('trained_model', map_location=torch.device('cpu')) | |
model.load_state_dict(weights) | |
model.eval() | |
file_list = os.listdir('data') | |
examples = [] | |
usersIndexes = [] | |
for x in file_list: | |
if x[0:2] not in usersIndexes: | |
usersIndexes.append(x[0:2]) | |
for user in usersIndexes: | |
usersImages = [x for x in file_list if str(user) in x] | |
notUsersImages = [x for x in file_list if str(user) not in x] | |
for userImage in usersImages: | |
for userImageCopy in usersImages: | |
examples.append([userImage, userImageCopy, 0]) | |
for notUser in notUsersImages: | |
examples.append([userImage, notUser, 1]) | |
#%% | |
def predict(input1, input2, label=None): | |
img1_PIL = Image.open(f'data/{input1}') | |
img2_PIL = Image.open(f'data/{input2}') | |
img1 = transforms.ToTensor()(img1_PIL).unsqueeze(0) | |
img2 = transforms.ToTensor()(img2_PIL).unsqueeze(0) | |
for el in examples: | |
if input1 == input2: | |
label = 0 | |
break | |
if input1 in el and input2 in el: | |
label = el[2] | |
with torch.no_grad(): | |
out1, out2 = model(img1, img2) | |
pred = F.pairwise_distance(out1, out2) | |
if pred < 0.6: | |
decision = f'Access granted, confidence: {pred.item():4f}' | |
else: | |
decision = f'Access denied, confidence: {pred.item():4f}' | |
return img1_PIL, img2_PIL, decision, label | |
#%% | |
with gr.Blocks() as demo: | |
drop1 = gr.Dropdown( | |
choices=file_list, | |
label='First image', | |
scale=0 | |
) | |
drop2 = gr.Dropdown( | |
choices=file_list, | |
label='Second image', | |
scale=0 | |
) | |
with gr.Row(): | |
img1 = gr.Image(value=f'data/{examples[0][0]}', height=153, width=136, interactive=False, scale=0, label='image1') | |
img2 = gr.Image(value=f'data/{examples[0][0]}', height=153, width=136, interactive=False, scale=0, label='image2') | |
label = gr.Label(label='0 means images represent the same fingerprint') | |
output = gr.Label(value=predict(*examples[0])[2], label='Prediction') | |
drop1.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label]) | |
drop2.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label]) | |
demo.launch() | |
# %% | |