Spaces:
Running
Running
import cv2 | |
import face_recognition | |
import gradio as gr | |
import datetime as DT | |
import pytz | |
ipAddress = None | |
def __nowInIST(): | |
return DT.datetime.now(pytz.timezone("Asia/Kolkata")) | |
def __attachIp(request: gr.Request): | |
global ipAddress | |
x_forwarded_for = request.headers.get('x-forwarded-for') | |
if x_forwarded_for: | |
ipAddress = x_forwarded_for | |
def pprint(log: str): | |
now = __nowInIST() | |
now = now.strftime("%Y-%m-%d %H:%M:%S") | |
print(f"[{now}] [{ipAddress}] {log}") | |
def __findFaceEncodings(imagePath): | |
image = cv2.imread(imagePath) | |
faceEncodings = face_recognition.face_encodings(image) | |
return faceEncodings[0] if len(faceEncodings) > 0 else None | |
def predictMatch(firstImage, secondImage): | |
image1Encoding = __findFaceEncodings(firstImage) | |
image2Encoding = __findFaceEncodings(secondImage) | |
pprint("Starting the job...") | |
distance = face_recognition.face_distance([image1Encoding], image2Encoding) | |
distancePercent = round(distance[0] * 100) | |
matchPercent = 100 - distancePercent | |
pprint(f"Job finished. Match : {matchPercent}%") | |
isSame = matchPercent > 50 | |
amplificationFactor = 1.5 | |
adjustment = (amplificationFactor - 0.5) * 100 | |
outputTexts = [] | |
if isSame: | |
outputTexts.append("The images are of the same person ✅") | |
matchScore = round((matchPercent + adjustment) / (100 + adjustment) * 100) | |
outputTexts.append(f"\nMatch Score: {matchScore}%") | |
else: | |
outputTexts.append("The images are not of the same person ❌") | |
outputText = "\n".join(outputTexts) | |
pprint(f"{outputText=}") | |
return outputText | |
with gr.Row(elem_classes=["main-container"]): | |
with gr.Row(elem_classes=["img-container"]): | |
firstImage = gr.Image(type='filepath', height=300, elem_classes=["image"], label="1st Image", container=True) | |
secondImage = gr.Image(type='filepath', height=300, elem_classes=["image"], label="2nd Image", container=True) | |
with gr.Row(elem_classes=["output-container"]): | |
result = gr.Textbox(label="Result", elem_classes=["output"], scale=2) | |
with gr.Interface( | |
fn=predictMatch, | |
inputs=[ | |
firstImage, | |
secondImage, | |
], | |
outputs=[ | |
result | |
], | |
title="Face Match Detector", | |
allow_flagging="never", | |
) as demo: | |
demo.load(__attachIp, None, None) | |
demo.launch(debug=True) | |