Spaces:
Running
Running
File size: 2,381 Bytes
c2b0164 3683b73 c2b0164 3683b73 c2b0164 3683b73 c2b0164 3683b73 c2b0164 3683b73 c2b0164 3683b73 c2b0164 3683b73 1caf30b 3683b73 0d34a04 3683b73 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 |
import cv2
import face_recognition
import gradio as gr
import datetime as DT
import pytz
ipAddress = None
def __nowInIST():
return DT.datetime.now(pytz.timezone("Asia/Kolkata"))
def __attachIp(request: gr.Request):
global ipAddress
x_forwarded_for = request.headers.get('x-forwarded-for')
if x_forwarded_for:
ipAddress = x_forwarded_for
def pprint(log: str):
now = __nowInIST()
now = now.strftime("%Y-%m-%d %H:%M:%S")
print(f"[{now}] [{ipAddress}] {log}")
def __findFaceEncodings(imagePath):
image = cv2.imread(imagePath)
faceEncodings = face_recognition.face_encodings(image)
return faceEncodings[0] if len(faceEncodings) > 0 else None
def predictMatch(firstImage, secondImage):
image1Encoding = __findFaceEncodings(firstImage)
image2Encoding = __findFaceEncodings(secondImage)
pprint("Starting the job...")
distance = face_recognition.face_distance([image1Encoding], image2Encoding)
distancePercent = round(distance[0] * 100)
matchPercent = 100 - distancePercent
pprint(f"Job finished. Match : {matchPercent}%")
isSame = matchPercent > 50
amplificationFactor = 1.5
adjustment = (amplificationFactor - 0.5) * 100
outputTexts = []
if isSame:
outputTexts.append("The images are of the same person ✅")
matchScore = round((matchPercent + adjustment) / (100 + adjustment) * 100)
outputTexts.append(f"\nMatch Score: {matchScore}%")
else:
outputTexts.append("The images are not of the same person ❌")
outputText = "\n".join(outputTexts)
pprint(f"{outputText=}")
return outputText
with gr.Row(elem_classes=["main-container"]):
with gr.Row(elem_classes=["img-container"]):
firstImage = gr.Image(type='filepath', height=300, elem_classes=["image"], label="1st Image", container=True)
secondImage = gr.Image(type='filepath', height=300, elem_classes=["image"], label="2nd Image", container=True)
with gr.Row(elem_classes=["output-container"]):
result = gr.Textbox(label="Result", elem_classes=["output"], scale=2)
with gr.Interface(
fn=predictMatch,
inputs=[
firstImage,
secondImage,
],
outputs=[
result
],
title="Face Match Detector",
allow_flagging="never",
) as demo:
demo.load(__attachIp, None, None)
demo.launch(debug=True)
|