File size: 3,481 Bytes
1d4b84a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e764f1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1d4b84a
e764f1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1d4b84a
e764f1f
 
 
 
 
 
1d4b84a
e764f1f
 
 
 
 
 
 
 
 
 
 
 
 
 
027ca8f
e764f1f
1d4b84a
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
from huggingface_hub import from_pretrained_fastai
import gradio as gr
from fastai.vision.all import *
import os
import shutil
from os import path
from zipfile import ZipFile
from shutil import make_archive
import imutils
import time
import dlib
import cv2
from google.colab.patches import cv2_imshow
import os
from imutils import paths
import numpy as np
import pickle
import vptree

args = {}


# VP tree output file
args["tree"] = "tree.pickle"

# Hash dictionary output file
args["hashes"] = "hashesCaras.pickle"


# Hamming distance threshold
args["distance"] = 10


# Hallar el rectangulo con la cara
def convert_and_trim_bb(image, rect):
	# extract the starting and ending (x, y)-coordinates of the
	# bounding box
	startX = rect.left()
	startY = rect.top()
	endX = rect.right()
	endY = rect.bottom()
	# ensure the bounding box coordinates fall within the spatial
	# dimensions of the image
	startX = max(0, startX)
	startY = max(0, startY)
	endX = min(endX, image.shape[1])
	endY = min(endY, image.shape[0])
	# compute the width and height of the bounding box
	w = endX - startX
	h = endY - startY
	# return our bounding box coordinates
	return (startX, startY, w, h)

detector = dlib.get_frontal_face_detector()
# Initial image is modified
# final hash size is hashSize^2 bits
def dhash(image, hashSize=8):
	# Convert to gray
  if (image.shape[2] == 3):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

  # Resize to hashSize (add 1 column for gradient calculations)
  resized = cv2.resize(image, (hashSize + 1, hashSize))
  # Compute relative horizontal gradient in columns
  diff = resized[:, 1:] > resized[:, :-1]
	# convert the difference image to a hash
  return sum([2 ** i for (i, v) in enumerate(diff.flatten()) if v])

def convert_hash(h):
	# Convert hash "h" into python's int
	return int(np.array(h, dtype="float64"))

def hamming(a, b):
	# Hamming distance
  # Count 1 at XOR
	return bin(int(a) ^ int(b)).count("1")

tree = pickle.loads(open(args["tree"], "rb").read())
hashes = pickle.loads(open(args["hashes"], "rb").read())


def FaceDetect(img):
    # Get hashes

    scannedhashes = []
    
    # Scan faces
    image = imutils.resize(image, width=600)
    rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    upsample=3
    rects = detector(rgb, upsample)
    
    # Discard if no detection
    if len(rects)==0:
      print("Me cago en tu puta madre")
    
    else:
      boxes = [convert_and_trim_bb(image, r) for r in rects]
      for (x, y, w, h) in boxes:
        cara = image[y:y+h, x:x+w]
    
        cv2_imshow(cara)
    
        # Compute the hash for the face and convert it
        try:
          h = dhash(cara.copy())
        except:
          continue
        h = convert_hash(h)
    
        # Update the nice hashes dictionary
        scannedhashes.append(h)
    for i in scannedhashes:
        results = tree.get_all_in_range(i, args["distance"])
        results = sorted(results)
        resultPathsList = []
        for (d,h) in results:
        	resultPaths = hashes.get(h, [])
        	if len(resultPaths) == 0:
        		continue
        	for p in resultPaths:
        		if p not in resultPathsList:
        			resultPathsList.append(p)
            for p in resultPathsList:
                print(p)
              


    
# Creamos la interfaz y la lanzamos. 
gr.Interface(fn=FaceDetect, inputs=gr.inputs.Image(shape=(128, 128)), outputs=gr.outputs.Label(num_top_classes=3),examples=['Pawn.jpg','Knight.jpg']).launch(share=False)