TrabajoEquipo3 / app.py
KaraSpdrnr's picture
Update app.py
027ca8f verified
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.vision.all import *
import os
import shutil
from os import path
from zipfile import ZipFile
from shutil import make_archive
import imutils
import time
import dlib
import cv2
from google.colab.patches import cv2_imshow
import os
from imutils import paths
import numpy as np
import pickle
import vptree
args = {}
# VP tree output file
args["tree"] = "tree.pickle"
# Hash dictionary output file
args["hashes"] = "hashesCaras.pickle"
# Hamming distance threshold
args["distance"] = 10
# Hallar el rectangulo con la cara
def convert_and_trim_bb(image, rect):
# extract the starting and ending (x, y)-coordinates of the
# bounding box
startX = rect.left()
startY = rect.top()
endX = rect.right()
endY = rect.bottom()
# ensure the bounding box coordinates fall within the spatial
# dimensions of the image
startX = max(0, startX)
startY = max(0, startY)
endX = min(endX, image.shape[1])
endY = min(endY, image.shape[0])
# compute the width and height of the bounding box
w = endX - startX
h = endY - startY
# return our bounding box coordinates
return (startX, startY, w, h)
detector = dlib.get_frontal_face_detector()
# Initial image is modified
# final hash size is hashSize^2 bits
def dhash(image, hashSize=8):
# Convert to gray
if (image.shape[2] == 3):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Resize to hashSize (add 1 column for gradient calculations)
resized = cv2.resize(image, (hashSize + 1, hashSize))
# Compute relative horizontal gradient in columns
diff = resized[:, 1:] > resized[:, :-1]
# convert the difference image to a hash
return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])
def convert_hash(h):
# Convert hash "h" into python's int
return int(np.array(h, dtype="float64"))
def hamming(a, b):
# Hamming distance
# Count 1 at XOR
return bin(int(a) ^ int(b)).count("1")
tree = pickle.loads(open(args["tree"], "rb").read())
hashes = pickle.loads(open(args["hashes"], "rb").read())
def FaceDetect(img):
# Get hashes
scannedhashes = []
# Scan faces
image = imutils.resize(image, width=600)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
upsample=3
rects = detector(rgb, upsample)
# Discard if no detection
if len(rects)==0:
print("Me cago en tu puta madre")
else:
boxes = [convert_and_trim_bb(image, r) for r in rects]
for (x, y, w, h) in boxes:
cara = image[y:y+h, x:x+w]
cv2_imshow(cara)
# Compute the hash for the face and convert it
try:
h = dhash(cara.copy())
except:
continue
h = convert_hash(h)
# Update the nice hashes dictionary
scannedhashes.append(h)
for i in scannedhashes:
results = tree.get_all_in_range(i, args["distance"])
results = sorted(results)
resultPathsList = []
for (d,h) in results:
resultPaths = hashes.get(h, [])
if len(resultPaths) == 0:
continue
for p in resultPaths:
if p not in resultPathsList:
resultPathsList.append(p)
for p in resultPathsList:
print(p)
# Creamos la interfaz y la lanzamos.
gr.Interface(fn=FaceDetect, inputs=gr.inputs.Image(shape=(128, 128)), outputs=gr.outputs.Label(num_top_classes=3),examples=['Pawn.jpg','Knight.jpg']).launch(share=False)