Padzong's picture
Initial load color bug fix
4738870 verified
#%%
import gradio as gr
from PIL import Image
from torchvision import transforms
from siamese_nn import Siamese_nn
import torch, os
import torch.nn.functional as F
model = Siamese_nn()
weights = torch.load('trained_model', map_location=torch.device('cpu'))
model.load_state_dict(weights)
model.eval()
file_list = os.listdir("data")
examples = []
usersIndexes = []
for x in file_list:
if x[0:2] not in usersIndexes:
usersIndexes.append(x[0:2])
for user in usersIndexes:
usersImages = [x for x in file_list if str(user) in x]
notUsersImages = [x for x in file_list if str(user) not in x]
for userImage in usersImages:
for userImageCopy in usersImages:
examples.append([userImage, userImageCopy, 0])
for notUser in notUsersImages:
examples.append([userImage, notUser, 1])
def predict(input1, input2, label=None):
img1_PIL = Image.open(f'data/{input1}')
img2_PIL = Image.open(f'data/{input2}')
img1 = transforms.ToTensor()(img1_PIL).unsqueeze(0)
img2 = transforms.ToTensor()(img2_PIL).unsqueeze(0)
for el in examples:
if input1 == el[0] and input2 == el[1] and el[2] == 0:
label = 'Scans of the same finger'
break
if input1 == el[0] and input2 == el[1] and el[2] == 1:
label = 'Scans of different fingers'
with torch.no_grad():
out1, out2 = model(img1, img2)
pred = F.pairwise_distance(out1, out2)
if pred < 0.6:
decision = f'Access granted, confidence: {pred.item():4f}'
else:
decision = f'Access denied, confidence: {pred.item():4f}'
return img1_PIL, img2_PIL, decision, label
#%%
css = """
.gradio-container {
height: 100vh;
max-width: 1024px !important;
}
.my_img {
max-height: 288px !important;
object-fit: cover !important;
}
.img-select div.secondary-wrap {
position: relative;
}
.img-select div.icon-wrap {
position: absolute;
pointer-events: none;
right: 0;
}
#res div h2 { color: #07ef03; }
"""
js = """
() => {
label = document.querySelector("#res div h2");
txt = label.textContent.split(",")[0]
if (txt === 'Access granted') {
label.style.color = "#07ef03";
}
if (txt === 'Access denied') {
label.style.color = "red";
}
}
"""
dropdowns = """
() => {
input_el = document.querySelectorAll(".img-select input");
input_el[0].type = "button";
input_el[1].type = "button";
/*
svg = document.querySelectorAll(".img-select div.icon-wrap");
ul = document.querySelectorAll(".img-select ul.options);
for (let i = 0; i < input_el.length; i++){
input_el[i].addEventListener("click", () => {
svg[i].style.transform = "rotate(180deg)";
})
}*/
}
"""
def refresh():
image = Image.open(f'data/{file_list[0]}')
return image, image
with gr.Blocks(css=css, js=dropdowns, elem_classes=['container']) as demo:
md = gr.Markdown(value="""# Follow the steps
- To check model performance choose first and second image from available examples.
- You can pair up images of the same or different fingerprints. The result from model will be automatically calculated.
- Additionally displayed confidence shows the similarity between images. The closer to 0, the more similar - more confident model
- Access is granted if value of confidence is below certain threshold found during model testing.""")
with gr.Row():
with gr.Row():
drop1 = gr.Dropdown(value=file_list[0],
choices=file_list,
label='Select first image',
scale=1,
elem_classes='img-select',
)
drop2 = gr.Dropdown(value=file_list[0],
choices=file_list,
label='Select second image',
scale=1,
elem_classes='img-select',
)
label = gr.Label(value='Scans of the same finger', show_label=False)
with gr.Row():
img1 = gr.Image(height=288, # unfortunately value doesn't work properly
width=256,
interactive=False,
scale=1,
label='first image',
show_download_button=False,
show_share_button=False,
elem_classes=['my-img'])
img2 = gr.Image(height=288,
width=256,
interactive=False,
scale=1,
label='second image',
show_download_button=False,
show_share_button=False,
elem_classes=['my-img'])
output = gr.Label(value=predict(*examples[0])[2], elem_id='res', show_label=False)
drop1.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
drop2.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
output.change(fn=None, inputs=None, js=js)
# initial img load workaround
demo.load(fn=refresh, inputs=None, outputs=[img1, img2])
demo.launch()
# %%