File size: 3,057 Bytes
ef7a8f6
 
5e0e1b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c87dd3b
 
5e0e1b3
 
 
 
 
 
977ddd6
c87dd3b
977ddd6
5e0e1b3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0e201ea
5e0e1b3
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
<!DOCTYPE html>
<html>
<head>
    <meta charset="UTF-8">
    <script src="js/face-api.min.js"></script>
    <!--<script src="https://cdn.jsdelivr.net/npm/face-api.js"></script>-->
    <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
    <script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/body-pix"></script>
    
    <style>
        * {
            box-sizing: border-box;
            margin: 0;
            padding: 0;
        }

        video {
            position: absolute;
            z-index: 1;
        }

        canvas {
            position: relative;
            z-index: 20;
        }
    </style>
    <style>
        /* Máscara para tapar a la persona */
        .mask {
            position: absolute;
            top: 0;
            left: 0;
            width: 100%;
            height: 100%;
            background-color: rgba(255, 255, 255, 0.5);
            pointer-events: none; /* Permite interactuar con elementos debajo */
        }
    </style>
</head>
<body>
    <video width=640 height=480 onloadedmetadata="onPlay(this)" autoplay muted playsinline id="camera"></video>
    <div class="mask"></div>
    <canvas width=640 height=480 id="overlay"></canvas>

    <script>
        Promise.all([
            faceapi.nets.ssdMobilenetv1.loadFromUri('/models'),
            faceapi.nets.tinyFaceDetector.loadFromUri('/models'),
            faceapi.nets.faceLandmark68Net.loadFromUri('/models'),
            faceapi.nets.faceRecognitionNet.loadFromUri('/models'),
            faceapi.nets.faceExpressionNet.loadFromUri('/models'),
            faceapi.nets.ageGenderNet.loadFromUri('/models'),
        ]).then(onPlay);
        
        const video = document.getElementById('camera');
        const canvas = document.getElementById('overlay');
        
        (async () => {
            const stream = await navigator.mediaDevices.getUserMedia({ video: {} });
            video.srcObject = stream;
        })();
        
        async function onPlay() {
        
            let fullFaceDescriptions = await faceapi.detectAllFaces(video)
                .withFaceLandmarks()
                .withFaceDescriptors()
                .withAgeAndGender();
        
            const dims = faceapi.matchDimensions(canvas, video, true);
            const resizedResults = faceapi.resizeResults(fullFaceDescriptions, dims);
        
            resizedResults.forEach(async (detection) => {
                console.log(detection)
                const { age, gender, genderProbability } = detection;
                console.log(`Edad: ${Math.round(age)} años.`);
                if (age < 18) {
                  const box = detection.detection.box;
                  const ctx = canvas.getContext('2d');
                  ctx.fillStyle = "rgba(255, 255, 255, 0.7)"; // Color blanco opaco
                  ctx.fillRect(box.x, box.y, box.width, box.height);
                  
                }
            });
        
            setTimeout(() => onPlay(), 100)
        
        }
    </script>
</body>
</html>