Spaces:
Runtime error
Runtime error
Initial Commit
Browse files- app.py +45 -0
- keras_model.h5 +3 -0
- labels.txt +4 -0
- packages.txt +1 -0
- requirements.txt +6 -0
app.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import gradio as gr
|
3 |
+
import math
|
4 |
+
import numpy as np
|
5 |
+
from cvzone.ClassificationModule import Classifier
|
6 |
+
from cvzone.HandTrackingModule import HandDetector
|
7 |
+
|
8 |
+
bgSize = 96
|
9 |
+
classifier = Classifier("keras_model.h5", "labels.txt")
|
10 |
+
detector = HandDetector(maxHands=1)
|
11 |
+
labels = ["Look", "Drink", "Eat", "Ok"]
|
12 |
+
offset = 20
|
13 |
+
|
14 |
+
def segment(image):
|
15 |
+
hands, frame = detector.findHands(image)
|
16 |
+
try:
|
17 |
+
if hands:
|
18 |
+
hand = hands[0]
|
19 |
+
x, y, w, h = hand['bbox']
|
20 |
+
croppedHand = np.ones((bgSize, bgSize, 3), np.uint8) * 12
|
21 |
+
imgCrop = frame[y - offset:y + h +
|
22 |
+
offset, x - offset:x + w + offset]
|
23 |
+
aspectRatio = h / w
|
24 |
+
if aspectRatio > 1:
|
25 |
+
constant = bgSize / h
|
26 |
+
wComputed = math.floor(constant * w)
|
27 |
+
bgResize = cv2.resize(imgCrop, (wComputed, bgSize))
|
28 |
+
bgResizeShape = bgResize.shape
|
29 |
+
wGap = math.floor((bgSize-wComputed)/2)
|
30 |
+
croppedHand[:bgResizeShape[0],
|
31 |
+
wGap:wGap + wComputed] = bgResize
|
32 |
+
else:
|
33 |
+
constant = bgSize / w
|
34 |
+
hComputed = math.floor(constant * h)
|
35 |
+
bgResize = cv2.resize(imgCrop, (bgSize, hComputed))
|
36 |
+
bgResizeShape = bgResize.shape
|
37 |
+
hGap = math.floor((bgSize - hComputed) / 2)
|
38 |
+
croppedHand[hGap: hComputed + hGap, :] = bgResize
|
39 |
+
_, index = classifier.getPrediction(croppedHand, draw=False)
|
40 |
+
return labels[index]
|
41 |
+
except Exception as e:
|
42 |
+
print(e)
|
43 |
+
return 'No sign detected'
|
44 |
+
|
45 |
+
gr.interface.Interface(fn=segment, live=True, inputs=gr.Image(source='webcam', streaming=True), outputs="text").launch()
|
keras_model.h5
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:843e41a49969469d4b5ef0ce6cbb0c35b33467e593f7ed709dc273dbc259e7cd
|
3 |
+
size 2453432
|
labels.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
0 Look
|
2 |
+
1 Drink
|
3 |
+
2 Eat
|
4 |
+
3 Ok
|
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
python3-opencv
|
requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cvzone==1.5.6
|
2 |
+
gradio==3.4.1
|
3 |
+
numpy==1.23.4
|
4 |
+
mediapipe==0.8.11
|
5 |
+
opencv_contrib_python==4.6.0.66
|
6 |
+
tensorflow==2.10.0
|