Spaces:
Runtime error
Runtime error
ASL app
Browse files- .gitignore +7 -0
- app.py +44 -0
- requirements.txt +47 -0
.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
flagged/
|
2 |
+
*.pt
|
3 |
+
*.png
|
4 |
+
*.jpg
|
5 |
+
*.mp4
|
6 |
+
*.mkv
|
7 |
+
gradio_cached_examples/
|
app.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import requests
|
4 |
+
from ultralytics import YOLO
|
5 |
+
|
6 |
+
model = YOLO('best.pt')
|
7 |
+
path = 'image.jpg'
|
8 |
+
classes = ['ain', 'al', 'aleff','bb','dal','dha','dhad','fa','gaaf','ghain','ha','haa','jeem','kaaf','khaa','la','laam',
|
9 |
+
'meem','nun','ra','saad','seen','sheen','ta','taa','thaa','thal','toot','waw','ya','yaa','zay']
|
10 |
+
TargetMapper = dict(zip(range(32),classes))
|
11 |
+
def show_preds_image(image_path):
|
12 |
+
image = cv2.imread(image_path)
|
13 |
+
outputs = model.predict(source=image_path)
|
14 |
+
results = outputs[0].cpu().numpy()
|
15 |
+
for i, det in enumerate(results.boxes.xyxy):
|
16 |
+
cls = TargetMapper[results.boxes.cls[0]]
|
17 |
+
print(cls)
|
18 |
+
cv2.rectangle(
|
19 |
+
image,
|
20 |
+
(int(det[0]), int(det[1])),
|
21 |
+
(int(det[2]), int(det[3])),
|
22 |
+
color=(0, 0, 255),
|
23 |
+
thickness=2,
|
24 |
+
lineType=cv2.LINE_AA
|
25 |
+
)
|
26 |
+
cv2.putText(image, cls, (int(det[0]), int(det[1])-10), cv2.FONT_HERSHEY_SIMPLEX, 0.9, (36,255,12), 2)
|
27 |
+
|
28 |
+
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
|
29 |
+
|
30 |
+
#image = cv2.imwrite('output.jpg', show_preds_image(path))
|
31 |
+
inputs_image = [
|
32 |
+
gr.components.Image(type="filepath", label="Input Image"),
|
33 |
+
]
|
34 |
+
outputs_image = [
|
35 |
+
gr.components.Image(type="numpy", label="Output Image"),
|
36 |
+
]
|
37 |
+
interface_image = gr.Interface(
|
38 |
+
fn=show_preds_image,
|
39 |
+
inputs=inputs_image,
|
40 |
+
outputs=outputs_image,
|
41 |
+
title="Arab Sign Language Detection app",
|
42 |
+
examples=path,
|
43 |
+
cache_examples=False,
|
44 |
+
).launch()
|
requirements.txt
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Ultralytics requirements
|
2 |
+
# Usage: pip install -r requirements.txt
|
3 |
+
|
4 |
+
# Base ----------------------------------------
|
5 |
+
hydra-core>=1.2.0
|
6 |
+
matplotlib>=3.2.2
|
7 |
+
numpy>=1.18.5
|
8 |
+
opencv-python>=4.1.1
|
9 |
+
Pillow>=7.1.2
|
10 |
+
PyYAML>=5.3.1
|
11 |
+
requests>=2.23.0
|
12 |
+
scipy>=1.4.1
|
13 |
+
torch>=1.7.0
|
14 |
+
torchvision>=0.8.1
|
15 |
+
tqdm>=4.64.0
|
16 |
+
ultralytics
|
17 |
+
|
18 |
+
# Logging -------------------------------------
|
19 |
+
tensorboard>=2.4.1
|
20 |
+
# clearml
|
21 |
+
# comet
|
22 |
+
|
23 |
+
# Plotting ------------------------------------
|
24 |
+
pandas>=1.1.4
|
25 |
+
seaborn>=0.11.0
|
26 |
+
|
27 |
+
# Export --------------------------------------
|
28 |
+
# coremltools>=6.0 # CoreML export
|
29 |
+
# onnx>=1.12.0 # ONNX export
|
30 |
+
# onnx-simplifier>=0.4.1 # ONNX simplifier
|
31 |
+
# nvidia-pyindex # TensorRT export
|
32 |
+
# nvidia-tensorrt # TensorRT export
|
33 |
+
# scikit-learn==0.19.2 # CoreML quantization
|
34 |
+
# tensorflow>=2.4.1 # TF exports (-cpu, -aarch64, -macos)
|
35 |
+
# tensorflowjs>=3.9.0 # TF.js export
|
36 |
+
# openvino-dev # OpenVINO export
|
37 |
+
|
38 |
+
# Extras --------------------------------------
|
39 |
+
ipython # interactive notebook
|
40 |
+
psutil # system utilization
|
41 |
+
thop>=0.1.1 # FLOPs computation
|
42 |
+
# albumentations>=1.0.3
|
43 |
+
# pycocotools>=2.0.6 # COCO mAP
|
44 |
+
# roboflow
|
45 |
+
|
46 |
+
# HUB -----------------------------------------
|
47 |
+
GitPython>=3.1.24
|