aminaB9's picture
Added image processing
e9b94b0
import gradio as gr
import numpy as np
from PIL import Image
from transformers import AutoImageProcessor, AutoModel
import torch
import timm
import torch.nn.functional as F
from torchvision import transforms
import time
import subprocess
import os
from facenet_pytorch import MTCNN
mtcnn = MTCNN(keep_all=False)
def crop_face_to_112x112(image: Image.Image):
if image.size == (112, 112):
return image
boxes, _ = mtcnn.detect(image)
if boxes is None:
raise ValueError("No face detected.")
x1, y1, x2, y2 = map(int, boxes[0])
cropped = image.crop((x1, y1, x2, y2))
resized = cropped.resize((112, 112), Image.BILINEAR)
return resized
SECURITYLEVELS = ["128", "196", "256"]
FRMODELS = ["gaunernst/vit_tiny_patch8_112.arcface_ms1mv3",
"gaunernst/vit_tiny_patch8_112.cosface_ms1mv3"]
# ,
# "gaunernst/vit_tiny_patch8_112.adaface_ms1mv3",
# "gaunernst/vit_small_patch8_gap_112.cosface_ms1mv3",
# "gaunernst/convnext_nano.cosface_ms1mv3",
# "gaunernst/convnext_atto.cosface_ms1mv3"]
def runBinFile(*args):
binary_path = args[0]
if not os.path.isfile(binary_path):
return "Error: Compiled binary not Match."
try:
os.chmod(binary_path, 0o755)
start = time.time()
result = subprocess.run(
list(args),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True
)
end = time.time()
duration = (end - start) * 1000
if 'print' in args:
return result.stdout
elif 'styledPrint' in args:
return styled_output(result.stdout)
elif result.returncode == 0:
return True, f"<b>⏱️ Processing Time:</b> {duration:.0f} ms"
else:
return False
except Exception as e:
return f"Execution failed: {e}"
example_images = ['./VGGFace2/n000001/0002_01.jpg',
'./VGGFace2/n000149/0002_01.jpg',
'./VGGFace2/n000082/0001_02.jpg',
'./VGGFace2/n000148/0014_01.jpg',
'./VGGFace2/n000129/0001_01.jpg',
'./VGGFace2/n000394/0007_01.jpg',
]
example_images_auth = ['./VGGFace2/n000001/0013_01.jpg',
'./VGGFace2/n000149/0019_01.jpg',
'./VGGFace2/n000082/0003_03.jpg',
'./VGGFace2/n000148/0043_01.jpg',
'./VGGFace2/n000129/0006_01.jpg',
'./VGGFace2/n000394/0018_01.jpg',
]
def display_image(image):
return image
def load_rec_image():
return f'static/reconstructed.png'
def extract_emb(image, modelName=FRMODELS[0], mode=None):
transform = transforms.Compose([
transforms.ToTensor(),
transforms.RandomHorizontalFlip(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
image = transform(image)
image = image.unsqueeze(0)
model = timm.create_model(f"hf_hub:{modelName}", pretrained=True).eval()
with torch.no_grad():
embs = model(image)
embs = F.normalize(embs, dim=1)
embs = embs.detach().numpy()
embs = embs.squeeze(0)
if mode != None:
np.savetxt(f'{mode}-emb.txt', embs.reshape(1, embs.shape[0]), fmt="%.6f", delimiter=',')
return embs
def get_selected_image(evt: gr.SelectData):
return example_images[evt.index]
def get_selected_image_auth(evt: gr.SelectData):
return example_images_auth[evt.index]
def styled_output(result):
if result.strip().lower() == "match":
return "<span style='color: green; font-weight: bold;'>βœ”οΈ Match</span>"
elif result.strip().lower() == "no match":
return "<span style='color: red; font-weight: bold;'>❌ No Match</span>"
else:
return "<span style='color: red; font-weight: bold;'>Error</span>"
with gr.Blocks() as demo:
gr.HTML(
"""
<h1 align="center">Suraksh.AI</h1>
<p align="center">
<a href="https://suraksh-ai.vercel.app/"> https://suraksh-ai.vercel.app/</a>
</p>
"""
)
gr.Markdown("# Biometric verification (1:1 matching) Using Fully Homomorphic Encryption (FHE)")
gr.HTML(
"""
<p>This demo shows <strong>Suraksh.AI's</strong> biometric verification solution under <strong>FHE</strong>.</p>
<ul>
<li><strong>Scenario 1</strong>: Verifying an enrolled subject. For this scenario, the reference and probe should be from the same subject. Expected outcome: <span style='color: green; font-weight: bold;'>βœ”οΈ Match</span></li>
<li><strong>Scenario 2</strong>: Verifying an enrolled subject with high recognition threshold. For this scenario, the reference and probe should be from the same subject and increase the recognition threshold. Expected outcome: <span style='color: red; font-weight: bold;'>❌ No Match</span></li>
<li><strong>Scenario 3</strong>: Verifying a non-enrolled subject. For this scenario, choose a probe not enrolled. Expected outcome: <span style='color: red; font-weight: bold;'>❌ No Match</span></li>
<li><strong>Scenario 4</strong>: Verifying a non-enrolled subject with low recognition threshold. For this scenario, choose a probe not enrolled and lower the recognition threshold. Expected outcome: <span style='color: green; font-weight: bold;'>βœ”οΈ Match</span></li>
</ul>
"""
)
with gr.Row():
gr.Markdown("## Phase 1: Enrollment")
with gr.Row():
gr.Markdown("### Step 1: Upload or select a reference facial image for enrollment.")
with gr.Row():
image_input_enroll = gr.Image(type="pil", visible=False)
with gr.Column():
image_upload_enroll = gr.Image(label="Upload a reference facial image.", type="pil", sources="upload")
image_upload_enroll.change(fn=crop_face_to_112x112, inputs=image_upload_enroll, outputs=image_input_enroll)
with gr.Column():
example_gallery = gr.Gallery(value=example_images, columns=3)
example_gallery.select(fn=get_selected_image, inputs=None, outputs=image_input_enroll)
with gr.Column():
selectedImage = gr.Image(type="pil", label="Reference facial image", interactive=False)
image_input_enroll.change(fn=lambda img: img, inputs=image_input_enroll, outputs=selectedImage)
with gr.Row():
gr.Markdown("### Step 2: Generate reference embedding.")
with gr.Row():
with gr.Column():
modelName = gr.Dropdown(
choices=FRMODELS,
label="Choose a face recognition model"
)
with gr.Column():
example_gallery.select(fn=get_selected_image, inputs=None, outputs=image_input_enroll)
key_button = gr.Button("Generate embedding")
enroll_emb_text = gr.JSON(label="Reference embedding")
mode = gr.State("enroll")
key_button.click(fn=extract_emb, inputs=[image_input_enroll, modelName, mode], outputs=enroll_emb_text)
with gr.Row():
gr.Markdown("""Facial embeddings are **INVERTIBLE** and lead to the **RECONSTRUCTION** of their raw facial images.""")
with gr.Row():
gr.Markdown("### Example:")
with gr.Row():
original_image = gr.Image(value="static/original.jpg", label="Original", sources="upload")
key_button = gr.Button("Generate embedding")
output_text = gr.JSON(label="Target embedding")
key_button.click(fn=extract_emb, inputs=[original_image, modelName], outputs=output_text)
btn = gr.Button("Reconstruct facial image")
Reconstructed_image = gr.Image(label="Reconstructed")
btn.click(fn=load_rec_image, outputs=Reconstructed_image)
with gr.Row():
gr.Markdown("""Facial embeddings protection is a must! At **Suraksh.AI**, we protect facial embeddings using FHE.""")
with gr.Row():
gr.Markdown("### Step 3: πŸ” Generate the FHE public and secret keys.")
with gr.Row():
with gr.Column():
securityLevel = gr.Dropdown(
choices=SECURITYLEVELS,
label="Choose a security level"
)
with gr.Column():
key_button = gr.Button("Generate the FHE public and secret keys")
key_status = gr.Checkbox(label="FHE Public and Secret keys generated.", value=False)
time_output = gr.HTML()
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/genKeys.bin"), securityLevel, gr.State("genkeys")], outputs=[key_status,time_output])
with gr.Row():
gr.Markdown("### Step 4: πŸ”’ Encrypt reference embedding using FHE.")
with gr.Row():
with gr.Column():
key_button = gr.Button("Encrypt")
key_status = gr.Checkbox(label="Reference embedding encrypted.", value=False)
time_output = gr.HTML()
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/encReference.bin"), securityLevel, gr.State("encrypt")], outputs=[key_status,time_output])
with gr.Column():
key_button = gr.Button("Display")
output_text = gr.Text(label="Encrypted embedding", lines=3, interactive=False)
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/encReference.bin"), securityLevel, gr.State("print")], outputs=output_text)
with gr.Row():
gr.Markdown("## Phase 2: Authentication")
with gr.Row():
gr.Markdown("### Step 1: Upload or select a probe facial image for authentication.")
with gr.Row():
selectedImagePath_auth = gr.State()
image_input_auth = gr.Image(type="pil", visible=False)
with gr.Column():
image_upload_auth = gr.Image(label="Upload a facial image.", type="pil", sources="upload")
image_upload_auth.change(fn=crop_face_to_112x112, inputs=image_upload_auth, outputs=image_input_auth)
with gr.Column():
example_gallery = gr.Gallery(value=example_images_auth, columns=3)
example_gallery.select(fn=get_selected_image_auth, inputs=None, outputs=image_input_auth)
with gr.Column():
selectedImage = gr.Image(type="pil", label="Probe facial image", interactive=False)
image_input_auth.change(fn=lambda img: img, inputs=image_input_auth, outputs=selectedImage)
with gr.Row():
gr.Markdown("### Step 2: Generate probe facial embedding.")
with gr.Row():
with gr.Column():
example_gallery.select(fn=get_selected_image_auth, inputs=None, outputs=image_input_auth)
key_button = gr.Button("Generate embedding")
enroll_emb_text = gr.JSON(label="Probe embedding")
mode = gr.State("auth")
key_button.click(fn=extract_emb, inputs=[image_input_auth, modelName, mode], outputs=enroll_emb_text)
with gr.Row():
gr.Markdown("### Step 3: πŸ”€ Generate protected probe embedding.")
with gr.Row():
with gr.Column():
key_button = gr.Button("Protect")
key_status = gr.Checkbox(label="Probe embedding protected.", value=False)
time_output = gr.HTML()
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/encProbe.bin"), securityLevel, gr.State("encrypt")], outputs=[key_status,time_output])
with gr.Column():
key_button = gr.Button("Display")
output_text = gr.Text(label="Protected embedding", lines=3, interactive=False)
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/encProbe.bin"), securityLevel, gr.State("print")], outputs=output_text)
with gr.Row():
gr.Markdown("### Step 4: πŸ”’ Compute biometric recognition decision using the threshold under FHE.")
with gr.Row():
gr.Markdown("### Set the recognition threshold.")
with gr.Row():
slider_threshold = gr.Slider(-512*5, 512*5, step=1, value=133, label="Decision threshold", info="The higher the stricter.", interactive=True)
number_threshold = gr.Textbox(visible=False, value = '133')
slider_threshold.change(fn=lambda x: x, inputs=slider_threshold, outputs=number_threshold)
with gr.Row():
with gr.Column():
key_button = gr.Button("Biometric recognition under FHE")
key_status = gr.Checkbox(label="Recognition decision encrypted.", value=False)
time_output = gr.HTML()
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/recDecision.bin"), securityLevel, gr.State("decision"), number_threshold], outputs=[key_status,time_output])
with gr.Column():
key_button = gr.Button("Display")
output_text = gr.Text(label="Encrypted decision", lines=3, interactive=False)
key_button.click(fn=runBinFile, inputs=[gr.State("./bin/recDecision.bin"), securityLevel, gr.State("print")], outputs=output_text)
with gr.Row():
gr.Markdown("### Step 5: πŸ”‘ Decrypt biometric recognition decision.")
with gr.Row():
with gr.Column(scale=1):
decision_button = gr.Button("Decrypt")
decision_status = gr.Checkbox(label="Recognition decision decrypted.", value=False)
time_output = gr.HTML()
decision_button.click(fn=runBinFile, inputs=[gr.State("./bin/decDecision.bin"), securityLevel, gr.State("decision")], outputs=[decision_status, time_output])
with gr.Column(scale=3):
with gr.Row():
check_button = gr.Button("Check")
with gr.Row():
with gr.Column(scale=1):
final_output = gr.HTML()
check_button.click(fn=runBinFile, inputs=[gr.State("./bin/decDecision.bin"), securityLevel, gr.State("styledPrint")], outputs=final_output)
with gr.Column(scale=1):
image_output_enroll = gr.Image(label="Reference", sources="upload")
image_input_enroll.change(fn=display_image, inputs=image_input_enroll, outputs=image_output_enroll)
with gr.Column(scale=1):
image_output_auth = gr.Image(label="Probe", sources="upload")
image_input_auth.change(fn=display_image, inputs=image_input_auth, outputs=image_output_auth)
demo.launch()