RakanAlsheraiwi commited on
Commit
eee0e91
1 Parent(s): a5e58e6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +124 -0
app.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+ from PIL import Image, ImageDraw
4
+ import gradio as gr
5
+ import numpy as np
6
+ import pandas as pd
7
+ from transformers import pipeline
8
+
9
+ # Load the YOLOv5 model
10
+ model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
11
+
12
+ # Load the translation model
13
+ translator = pipeline("translation_en_to_ar", model="Helsinki-NLP/opus-mt-en-ar")
14
+
15
+ # Define a function to detect objects and draw bounding boxes for images
16
+ def detect_and_draw_image(input_image):
17
+ results = model(input_image)
18
+ detections = results.xyxy[0].numpy()
19
+
20
+ draw = ImageDraw.Draw(input_image)
21
+
22
+ counts = {}
23
+ for detection in detections:
24
+ xmin, ymin, xmax, ymax, conf, class_id = detection
25
+
26
+ # Update counts for each label
27
+ label = model.names[int(class_id)]
28
+ counts[label] = counts.get(label, 0) + 1
29
+
30
+ # Draw the bounding box
31
+ draw.rectangle([(xmin, ymin), (xmax, ymax)], outline="red", width=2)
32
+ draw.text((xmin, ymin), f"{label}: {conf:.2f}", fill="white")
33
+
34
+ # Translate counts to Arabic
35
+ translated_counts = translator(list(counts.keys()))
36
+
37
+ df = pd.DataFrame({
38
+ 'label (English)': list(counts.keys()),
39
+ 'label (Arabic)': [t['translation_text'] for t in translated_counts],
40
+ 'counts': list(counts.values())
41
+ })
42
+
43
+ return input_image, df
44
+
45
+ # Define a function to detect objects and draw bounding boxes for videos
46
+ def detect_and_draw_video(video_path):
47
+ cap = cv2.VideoCapture(video_path)
48
+ frames = []
49
+ frame_shape = None
50
+ overall_counts = {}
51
+ detected_objects = set() # Set to keep track of unique detections
52
+
53
+ while cap.isOpened():
54
+ ret, frame = cap.read()
55
+ if not ret:
56
+ break
57
+
58
+ frame = cv2.resize(frame, (640, 480))
59
+
60
+ results = model(frame)
61
+ detections = results.xyxy[0].numpy()
62
+
63
+ for detection in detections:
64
+ xmin, ymin, xmax, ymax, conf, class_id = detection
65
+
66
+ # Create a unique identifier for the object based on its bounding box
67
+ identifier = (model.names[int(class_id)], int((xmin + xmax) / 2), int((ymin + ymax) / 2))
68
+
69
+ # Count the object only if it hasn't been detected before
70
+ if identifier not in detected_objects:
71
+ detected_objects.add(identifier)
72
+ label = model.names[int(class_id)]
73
+ overall_counts[label] = overall_counts.get(label, 0) + 1
74
+
75
+ cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255, 0, 0), 2)
76
+ cv2.putText(frame, f"{model.names[int(class_id)]}: {conf:.2f}", (int(xmin), int(ymin) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
77
+
78
+ frames.append(frame)
79
+
80
+ cap.release()
81
+
82
+ if frame_shape is None:
83
+ return None, None
84
+
85
+ output_path = 'output.mp4'
86
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (640, 480))
87
+
88
+ for frame in frames:
89
+ out.write(frame)
90
+ out.release()
91
+
92
+ # Translate counts to Arabic
93
+ translated_counts = translator(list(overall_counts.keys()))
94
+
95
+ df = pd.DataFrame({
96
+ 'label (English)': list(overall_counts.keys()),
97
+ 'label (Arabic)': [t['translation_text'] for t in translated_counts],
98
+ 'counts': list(overall_counts.values())
99
+ })
100
+
101
+ return output_path, df
102
+
103
+ # Create separate interfaces for images and videos
104
+ image_interface = gr.Interface(
105
+ fn=detect_and_draw_image,
106
+ inputs=gr.Image(type="pil", label="Upload Image"),
107
+ outputs=[gr.Image(type="pil"), gr.Dataframe(label="Object Counts")],
108
+ title="Object Detection for Images",
109
+ description="Upload an image to see the objects detected by YOLOv5 with bounding boxes and their counts."
110
+ )
111
+
112
+ video_interface = gr.Interface(
113
+ fn=detect_and_draw_video,
114
+ inputs=gr.Video(label="Upload Video"),
115
+ outputs=[gr.Video(label="Processed Video"), gr.Dataframe(label="Object Counts")],
116
+ title="Object Detection for Videos",
117
+ description="Upload a video to see the objects detected by YOLOv5 with bounding boxes and their counts."
118
+ )
119
+
120
+ # Combine interfaces into a single app
121
+ app = gr.TabbedInterface([image_interface, video_interface], ["Image Detection", "Video Detection"])
122
+
123
+ # Launch the app
124
+ app.launch(debug=True)