Padzong commited on
Commit
41b4975
·
verified ·
1 Parent(s): a58f3f2

Create demo v1

Browse files
Files changed (1) hide show
  1. app.py +73 -4
app.py CHANGED
@@ -1,7 +1,76 @@
 
1
  import gradio as gr
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #%%
2
  import gradio as gr
3
+ from PIL import Image
4
+ from torchvision import transforms
5
+ from siamese_nn import Siamese_nn
6
+ import torch, os
7
+ import torch.nn.functional as F
8
 
9
+ model = Siamese_nn()
10
+ weights = torch.load('trained_model')
11
+ model.load_state_dict(weights)
12
+ model.eval()
13
+ #%%
14
+ file_list = os.listdir('data')
15
+ examples = []
16
+ usersIndexes = []
17
+ for x in file_list:
18
+ if x[0:2] not in usersIndexes:
19
+ usersIndexes.append(x[0:2])
20
+
21
+ for user in usersIndexes:
22
+ usersImages = [x for x in file_list if str(user) in x]
23
+ notUsersImages = [x for x in file_list if str(user) not in x]
24
 
25
+ for userImage in usersImages:
26
+ for userImageCopy in usersImages:
27
+ examples.append([userImage, userImageCopy, 0])
28
+ for notUser in notUsersImages:
29
+ examples.append([userImage, notUser, 1])
30
+
31
+ #%%
32
+ def predict(input1, input2, label=None):
33
+ img1_PIL = Image.open(f'data/{input1}')
34
+ img2_PIL = Image.open(f'data/{input2}')
35
+ img1 = transforms.ToTensor()(img1_PIL).unsqueeze(0)
36
+ img2 = transforms.ToTensor()(img2_PIL).unsqueeze(0)
37
+
38
+ for el in examples:
39
+ if input1 == input2:
40
+ label = 0
41
+ break
42
+ if input1 in el and input2 in el:
43
+ label = el[2]
44
+
45
+ with torch.no_grad():
46
+ out1, out2 = model(img1, img2)
47
+ pred = F.pairwise_distance(out1, out2)
48
+ if pred < 0.6:
49
+ decision = f'Access granted, confidence: {pred.item():4f}'
50
+ else:
51
+ decision = f'Access denied, confidence: {pred.item():4f}'
52
+ return img1_PIL, img2_PIL, decision, label
53
+
54
+ #%%
55
+ with gr.Blocks() as demo:
56
+ drop1 = gr.Dropdown(
57
+ choices=file_list,
58
+ label='First image',
59
+ scale=0
60
+ )
61
+ drop2 = gr.Dropdown(
62
+ choices=file_list,
63
+ label='Second image',
64
+ scale=0
65
+ )
66
+ with gr.Row():
67
+ img1 = gr.Image(value=f'data/{examples[0][0]}', height=153, width=136, interactive=False, scale=0, label='image1')
68
+ img2 = gr.Image(value=f'data/{examples[0][0]}', height=153, width=136, interactive=False, scale=0, label='image2')
69
+ label = gr.Label(label='0 means images represent the same fingerprint')
70
+ output = gr.Label(value=predict(*examples[0])[2], label='Prediction')
71
+
72
+ drop1.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
73
+ drop2.change(fn=predict, inputs=[drop1, drop2], outputs=[img1, img2, output, label])
74
+ demo.launch()
75
+
76
+ # %%