aminaB9 commited on
Commit
f003e69
Β·
1 Parent(s): cf5d9e9

Initial commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ .bin filter=lfs diff=lfs merge=lfs -text
37
+ **/*.bin filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ **/Server
2
+ **/Keys
3
+ **/Client
4
+ *-emb.txt
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: Biometric Recognition FHE
3
- emoji: πŸ‘
4
  colorFrom: red
5
  colorTo: blue
6
  sdk: gradio
 
1
  ---
2
  title: Biometric Recognition FHE
3
+ emoji: πŸ§‘ + πŸ“Έ + πŸ”
4
  colorFrom: red
5
  colorTo: blue
6
  sdk: gradio
VGGFace2/n000001/0002_01.jpg ADDED
VGGFace2/n000001/0013_01.jpg ADDED
VGGFace2/n000082/0001_02.jpg ADDED
VGGFace2/n000082/0003_03.jpg ADDED
VGGFace2/n000129/0001_01.jpg ADDED
VGGFace2/n000129/0006_01.jpg ADDED
VGGFace2/n000148/0014_01.jpg ADDED
VGGFace2/n000148/0043_01.jpg ADDED
VGGFace2/n000149/0002_01.jpg ADDED
VGGFace2/n000149/0019_01.jpg ADDED
VGGFace2/n000394/0007_01.jpg ADDED
VGGFace2/n000394/0018_01.jpg ADDED
app.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ import numpy as np
4
+ from PIL import Image
5
+ from transformers import AutoImageProcessor, AutoModel
6
+ import torch
7
+
8
+
9
+ import timm
10
+ import torch.nn.functional as F
11
+ from torchvision import transforms
12
+ import time
13
+
14
+
15
+ import subprocess
16
+ import os
17
+
18
+
19
+
20
+ def resizeImage(image):
21
+ resized = image.resize((112, 112))
22
+ return resized
23
+
24
+
25
+
26
+ def free_port(port):
27
+ try:
28
+ result = subprocess.check_output(f"lsof -t -i:{port}", shell=True).decode().strip()
29
+ if result:
30
+ for pid in result.split("\n"):
31
+ subprocess.call(["kill", "-9", pid])
32
+ except Exception as e:
33
+ print(f"Could not free port {port}: {e}")
34
+
35
+
36
+
37
+
38
+
39
+ SECURITYLEVELS = ["128", "196", "256"]
40
+
41
+ FRMODELS = ["gaunernst/vit_tiny_patch8_112.arcface_ms1mv3",
42
+ "gaunernst/vit_tiny_patch8_112.cosface_ms1mv3",
43
+ "gaunernst/vit_tiny_patch8_112.adaface_ms1mv3",
44
+ "gaunernst/vit_small_patch8_gap_112.cosface_ms1mv3",
45
+ "gaunernst/convnext_nano.cosface_ms1mv3",
46
+ "gaunernst/convnext_atto.cosface_ms1mv3"]
47
+
48
+
49
+
50
+
51
+ def runBinFile(*args):
52
+ binary_path = args[0]
53
+ if not os.path.isfile(binary_path):
54
+ return "Error: Compiled binary not found."
55
+ try:
56
+ os.chmod(binary_path, 0o755)
57
+ start = time.time()
58
+ result = subprocess.run(
59
+ list(args),
60
+ stdout=subprocess.PIPE,
61
+ stderr=subprocess.PIPE,
62
+ text=True
63
+ )
64
+ end = time.time()
65
+ duration = (end - start) * 1000
66
+ if 'print' in args:
67
+ return result.stdout
68
+ elif 'styledPrint' in args:
69
+ return styled_output(result.stdout)
70
+ elif result.returncode == 0:
71
+ return True, f"<b>⏱️ Processing Time:</b> {duration:.0f} ms"
72
+ else:
73
+ return False
74
+ except Exception as e:
75
+ return f"Execution failed: {e}"
76
+
77
+
78
+ example_images = ['./VGGFace2/n000001/0002_01.jpg',
79
+ './VGGFace2/n000149/0002_01.jpg',
80
+ './VGGFace2/n000082/0001_02.jpg',
81
+ './VGGFace2/n000148/0014_01.jpg',
82
+ './VGGFace2/n000129/0001_01.jpg',
83
+ './VGGFace2/n000394/0007_01.jpg',
84
+ ]
85
+
86
+ example_images_auth = ['./VGGFace2/n000001/0013_01.jpg',
87
+ './VGGFace2/n000149/0019_01.jpg',
88
+ './VGGFace2/n000082/0003_03.jpg',
89
+ './VGGFace2/n000148/0043_01.jpg',
90
+ './VGGFace2/n000129/0006_01.jpg',
91
+ './VGGFace2/n000394/0018_01.jpg',
92
+ ]
93
+
94
+
95
+ def display_image(image):
96
+ return image
97
+
98
+
99
+ def load_rec_image():
100
+ return f'static/reconstructed.png'
101
+
102
+
103
+ def extract_emb(image, modelName=FRMODELS[0], mode=None):
104
+ transform = transforms.Compose([
105
+ transforms.ToTensor(),
106
+ transforms.RandomHorizontalFlip(),
107
+ transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
108
+ ])
109
+ image = transform(image)
110
+ image = image.unsqueeze(0)
111
+
112
+ model = timm.create_model(f"hf_hub:{modelName}", pretrained=True).eval()
113
+ with torch.no_grad():
114
+ embs = model(image)
115
+ embs = F.normalize(embs, dim=1)
116
+ embs = embs.detach().numpy()
117
+ embs = embs.squeeze(0)
118
+ if mode != None:
119
+ np.savetxt(f'{mode}-emb.txt', embs.reshape(1, embs.shape[0]), fmt="%.6f", delimiter=',')
120
+ return embs
121
+
122
+ def get_selected_image(evt: gr.SelectData):
123
+ return example_images[evt.index]
124
+
125
+ def get_selected_image_auth(evt: gr.SelectData):
126
+ return example_images_auth[evt.index]
127
+
128
+
129
+ def styled_output(result):
130
+ if result.strip().lower() == "match":
131
+ return "<span style='color: green; font-weight: bold;'>βœ”οΈ Match</span>"
132
+ elif result.strip().lower() == "no match":
133
+ return "<span style='color: red; font-weight: bold;'>❌ No Match</span>"
134
+ else:
135
+ return "<span style='color: red; font-weight: bold;'>Error</span>"
136
+
137
+
138
+ with gr.Blocks() as demo:
139
+ gr.Markdown("# Biometric Recognition (1:1 matching) Using Fully Homomorphic Encryption (FHE)")
140
+ with gr.Row():
141
+ gr.Markdown("## Phase 1: Enrollment")
142
+ with gr.Row():
143
+ gr.Markdown("### Step 1: Upload or select a reference facial image for enrollment.")
144
+ with gr.Row():
145
+ with gr.Column():
146
+ image_input_enroll = gr.Image(label="Upload a reference facial image.", type="pil", sources="upload")
147
+ image_input_enroll.change(fn=resizeImage, inputs=image_input_enroll, outputs=image_input_enroll)
148
+ with gr.Column():
149
+ example_gallery = gr.Gallery(value=example_images, columns=3)
150
+ with gr.Column():
151
+ image_output_enroll = gr.Image(label="Reference facial image", sources="upload")
152
+ image_input_enroll.change(fn=display_image, inputs=image_input_enroll, outputs=image_output_enroll)
153
+
154
+ with gr.Row():
155
+ gr.Markdown("### Step 2: Generate reference embedding.")
156
+ with gr.Row():
157
+ with gr.Column():
158
+ modelName = gr.Dropdown(
159
+ choices=FRMODELS,
160
+ label="Choose a face recognition model"
161
+ )
162
+ with gr.Column():
163
+ example_gallery.select(fn=get_selected_image, inputs=None, outputs=image_input_enroll)
164
+ key_button = gr.Button("Generate embedding")
165
+ enroll_emb_text = gr.JSON(label="Reference embedding")
166
+ mode = gr.State("enroll")
167
+ key_button.click(fn=extract_emb, inputs=[image_input_enroll, modelName, mode], outputs=enroll_emb_text)
168
+
169
+
170
+ with gr.Row():
171
+ gr.HTML("<h3>Facial embeddings are <span style='color:red; font-weight:bold'>INVERTIBLE</span> and lead to the <span style='color:red; font-weight:bold'>RECONSTRUCTION</span> of their raw facial images.</h3>")
172
+ with gr.Row():
173
+ gr.Markdown("### Example:")
174
+ with gr.Row():
175
+ original_image = gr.Image(value="static/original.jpg", label="Original", sources="upload")
176
+ key_button = gr.Button("Generate embedding")
177
+ output_text = gr.JSON(label="Target embedding")
178
+ key_button.click(fn=extract_emb, inputs=[original_image, modelName], outputs=output_text)
179
+ btn = gr.Button("Reconstruct facial image")
180
+ Reconstructed_image = gr.Image(label="Reconstructed")
181
+ btn.click(fn=load_rec_image, outputs=Reconstructed_image)
182
+ with gr.Row():
183
+ gr.HTML("<h3>Facial embeddings protection is a <span style='color:red; font-weight:bold'>MUST!</span> At Suraksh.AI, we protect facial embeddings using FHE.</h3>")
184
+
185
+
186
+
187
+ with gr.Row():
188
+ gr.Markdown("### Step 3: πŸ” Generate the FHE public and secret keys.")
189
+ with gr.Row():
190
+ with gr.Column():
191
+ securityLevel = gr.Dropdown(
192
+ choices=SECURITYLEVELS,
193
+ label="Choose a security level"
194
+ )
195
+ with gr.Column():
196
+ key_button = gr.Button("Generate the FHE public and secret keys")
197
+ key_status = gr.Checkbox(label="FHE Public and Secret keys generated.", value=False)
198
+ time_output = gr.HTML()
199
+ key_button.click(fn=runBinFile, inputs=[gr.State("./genKeys.bin"), securityLevel, gr.State("genkeys")], outputs=[key_status,time_output])
200
+
201
+
202
+ with gr.Row():
203
+ gr.Markdown("### Step 4: πŸ”’ Encrypt reference embedding using FHE.")
204
+ with gr.Row():
205
+ with gr.Column():
206
+ key_button = gr.Button("Encrypt")
207
+ key_status = gr.Checkbox(label="Reference embedding encrypted.", value=False)
208
+ time_output = gr.HTML()
209
+ key_button.click(fn=runBinFile, inputs=[gr.State("./encReference.bin"), securityLevel, gr.State("encrypt")], outputs=[key_status,time_output])
210
+
211
+ with gr.Column():
212
+ key_button = gr.Button("Display")
213
+ output_text = gr.Text(label="Encrypted embedding", lines=3, interactive=False)
214
+ key_button.click(fn=runBinFile, inputs=[gr.State("./encReference.bin"), securityLevel, gr.State("print")], outputs=output_text)
215
+
216
+
217
+ with gr.Row():
218
+ gr.Markdown("## Phase 2: Authentication")
219
+ with gr.Row():
220
+ gr.Markdown("### Step 1: Upload or select a probe facial image for authentication.")
221
+ with gr.Row():
222
+ with gr.Column():
223
+ image_input_auth = gr.Image(label="Upload a facial image.", type="pil", sources="upload")
224
+ image_input_auth.change(fn=resizeImage, inputs=image_input_auth, outputs=image_input_auth)
225
+ with gr.Column():
226
+ example_gallery = gr.Gallery(value=example_images_auth, columns=3)
227
+ with gr.Column():
228
+ image_output_auth = gr.Image(label="Probe facial image", sources="upload")
229
+ image_input_auth.change(fn=display_image, inputs=image_input_auth, outputs=image_output_auth)
230
+
231
+ with gr.Row():
232
+ gr.Markdown("### Step 2: Generate probe facial embedding.")
233
+ with gr.Row():
234
+ with gr.Column():
235
+ example_gallery.select(fn=get_selected_image_auth, inputs=None, outputs=image_input_auth)
236
+ key_button = gr.Button("Generate embedding")
237
+ enroll_emb_text = gr.JSON(label="Probe embedding")
238
+ mode = gr.State("auth")
239
+ key_button.click(fn=extract_emb, inputs=[image_input_auth, modelName, mode], outputs=enroll_emb_text)
240
+ with gr.Row():
241
+ gr.Markdown("### Step 3: πŸ”€ Generate protected probe embedding.")
242
+ with gr.Row():
243
+ with gr.Column():
244
+ key_button = gr.Button("Protect")
245
+ key_status = gr.Checkbox(label="Probe embedding protected.", value=False)
246
+ time_output = gr.HTML()
247
+ key_button.click(fn=runBinFile, inputs=[gr.State("./encProbe.bin"), securityLevel, gr.State("encrypt")], outputs=[key_status,time_output])
248
+ with gr.Column():
249
+ key_button = gr.Button("Display")
250
+ output_text = gr.Text(label="Protected embedding", lines=3, interactive=False)
251
+ key_button.click(fn=runBinFile, inputs=[gr.State("./encProbe.bin"), securityLevel, gr.State("print")], outputs=output_text)
252
+
253
+ with gr.Row():
254
+ gr.Markdown("### Step 4: πŸ”’ Compute biometric recognition decision using the threshold under FHE.")
255
+ with gr.Row():
256
+ gr.Markdown("### Set the recognition threshold.")
257
+ with gr.Row():
258
+ slider_threshold = gr.Slider(0, 512*5, step=1, value=133, label="Decision threshold", info="The higher the stricter.", interactive=True)
259
+ number_threshold = gr.Textbox(visible=False, value = '133')
260
+ slider_threshold.change(fn=lambda x: x, inputs=slider_threshold, outputs=number_threshold)
261
+ with gr.Row():
262
+ with gr.Column():
263
+ key_button = gr.Button("Biometric recognition under FHE")
264
+ key_status = gr.Checkbox(label="Recognition decision encrypted.", value=False)
265
+ time_output = gr.HTML()
266
+ key_button.click(fn=runBinFile, inputs=[gr.State("./recDecision.bin"), securityLevel, gr.State("decision"), number_threshold], outputs=[key_status,time_output])
267
+ with gr.Column():
268
+ key_button = gr.Button("Display")
269
+ output_text = gr.Text(label="Encrypted decision", lines=3, interactive=False)
270
+ key_button.click(fn=runBinFile, inputs=[gr.State("./recDecision.bin"), securityLevel, gr.State("print")], outputs=output_text)
271
+
272
+
273
+ with gr.Row():
274
+ gr.Markdown("### Step 5: πŸ”‘ Decrypt biometric recognition decision.")
275
+ with gr.Row():
276
+ with gr.Column(scale=1):
277
+ decision_button = gr.Button("Decrypt")
278
+ decision_status = gr.Checkbox(label="Recognition decision decrypted.", value=False)
279
+ time_output = gr.HTML()
280
+ decision_button.click(fn=runBinFile, inputs=[gr.State("./decDecision.bin"), securityLevel, gr.State("decision")], outputs=[decision_status, time_output])
281
+ with gr.Column(scale=3):
282
+ with gr.Row():
283
+ check_button = gr.Button("Check")
284
+ with gr.Row():
285
+ with gr.Column(scale=1):
286
+ final_output = gr.HTML()
287
+ check_button.click(fn=runBinFile, inputs=[gr.State("./decDecision.bin"), securityLevel, gr.State("styledPrint")], outputs=final_output)
288
+ with gr.Column(scale=1):
289
+ image_output_enroll = gr.Image(label="Reference", sources="upload")
290
+ image_input_enroll.change(fn=display_image, inputs=image_input_enroll, outputs=image_output_enroll)
291
+ with gr.Column(scale=1):
292
+ image_output_auth = gr.Image(label="Probe", sources="upload")
293
+ image_input_auth.change(fn=display_image, inputs=image_input_auth, outputs=image_output_auth)
294
+
295
+
296
+
297
+
298
+
299
+ # preferred_port = 8080
300
+ # free_port(preferred_port)
301
+
302
+ #
303
+ # demo.launch(debug=True,server_port=preferred_port)
304
+
305
+
306
+ demo.launch()
bin/decDecision.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7675db4cd1c61743d73b78364ccefa313b408b09713e3b16e19f5a6eee087bb
3
+ size 8045152
bin/encProbe.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3959987099fb521134b6c46c367b2b6f6106bf9bb45eda98108e26710376f078
3
+ size 8030536
bin/encReference.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6705fbd4703ab2bc52d4af5719f5ece369de22d214dda966b124a85803c3a92d
3
+ size 8069184
bin/genKeys.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30c5e81854404a40c06ac9b727a17907536b1490defb055831921022b25e6e7c
3
+ size 8079376
bin/recDecision.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97d6a8be345463779f296ae73c70a082ea90e2c41add0f99f6875d75ff753c75
3
+ size 8057936
lookupTables/Borders_nB_3_dimF_512.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ -0.050880,-0.029846,-0.014102,-0.000000,0.014102,0.029846,0.050880
lookupTables/MFIP_nB_3_dQ_0.001_dimF_512.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ 5,3,2,1,-1,-2,-3,-5
2
+ 3,2,1,0,0,-1,-2,-3
3
+ 2,1,0,0,0,0,-1,-2
4
+ 1,0,0,0,0,0,0,-1
5
+ -1,0,0,0,0,0,0,1
6
+ -2,-1,0,0,0,0,1,2
7
+ -3,-2,-1,0,0,1,2,3
8
+ -5,-3,-2,-1,1,2,3,5
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio
2
+ transformers
3
+ torch
4
+ timm
5
+ opencv-python
6
+ pillow
7
+ torchvision
static/original.jpg ADDED
static/reconstructed.png ADDED