Spaces:
Sleeping
Sleeping
File size: 2,454 Bytes
41b4975 d1f74f7 41b4975 d1f74f7 41b4975 52b0d10 41b4975 52b0d10 41b4975 d1f74f7 41b4975 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
#%%
import gradio as gr
from PIL import Image
from torchvision import transforms
from siamese_nn import Siamese_nn
import torch, os
import torch.nn.functional as F
model = Siamese_nn()
weights = torch.load('trained_model', map_location=torch.device('cpu'))
model.load_state_dict(weights)
model.eval()
file_list = os.listdir('data')
examples = []
usersIndexes = []
for x in file_list:
if x[0:2] not in usersIndexes:
usersIndexes.append(x[0:2])
for user in usersIndexes:
usersImages = [x for x in file_list if str(user) in x]
notUsersImages = [x for x in file_list if str(user) not in x]
for userImage in usersImages:
for userImageCopy in usersImages:
examples.append([userImage, userImageCopy, 0])
for notUser in notUsersImages:
examples.append([userImage, notUser, 1])
#%%
def predict(input1, input2, label=None):
img1_PIL = Image.open(f'data/{input1}')
img2_PIL = Image.open(f'data/{input2}')
img1 = transforms.ToTensor()(img1_PIL).unsqueeze(0)
img2 = transforms.ToTensor()(img2_PIL).unsqueeze(0)
for el in examples:
if input1 == input2:
label = 0
break
if input1 in el and input2 in el:
label = el[2]
with torch.no_grad():
out1, out2 = model(img1, img2)
pred = F.pairwise_distance(out1, out2)
if pred < 0.6:
decision = f'Access granted, confidence: {pred.item():4f}'
else:
decision = f'Access denied, confidence: {pred.item():4f}'
return img1_PIL, img2_PIL, decision, label
#%%
with gr.Blocks() as demo:
drop1 = gr.Dropdown(
choices=file_list,
label='First image',
scale=0
)
drop2 = gr.Dropdown(
choices=file_list,
label='Second image',
scale=0
)
with gr.Row():
img1 = gr.Image(value=f'data/{examples[0][0]}', height=153, width=136, interactive=False, scale=0, label='image1')
img2 = gr.Image(value=f'data/{examples[0][0]}', height=153, width=136, interactive=False, scale=0, label='image2')
label = gr.Label(label='0 means images represent the same fingerprint')
output = gr.Label(value=predict(*examples[0])[2], label='Prediction')
drop1.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
drop2.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
demo.launch()
# %%
|