hb-setosys commited on
Commit
2543827
·
verified ·
1 Parent(s): b0c01c9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -55
app.py CHANGED
@@ -1,94 +1,108 @@
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
4
- from deep_sort_realtime.deepsort_tracker import DeepSort
5
-
6
- # Load YOLO model and configuration
7
- net = cv2.dnn.readNet("yolov3.weights", "yolov3.cfg")
8
- with open("coco.names", "r") as f:
9
- classes = [line.strip() for line in f.readlines()]
10
 
11
- # Initialize DeepSORT tracker
12
- tracker = DeepSort(max_age=30, n_init=3, nn_budget=20)
13
-
14
- def count_unique_people(video_path):
 
 
 
 
15
  # Open video
16
  cap = cv2.VideoCapture(video_path)
17
- if not cap.isOpened():
18
- return "Error: Unable to open video file."
19
 
20
- unique_people = set() # To store unique IDs
21
  frame_count = 0
22
-
 
 
23
  while cap.isOpened():
24
  ret, frame = cap.read()
25
  if not ret:
26
  break
27
-
28
- frame_count += 1
29
  height, width, _ = frame.shape
30
-
31
- # Detect people using YOLO
32
- blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (416, 416), swapRB=True, crop=False)
33
  net.setInput(blob)
 
 
34
  output_layers_names = net.getUnconnectedOutLayersNames()
 
 
35
  layer_outputs = net.forward(output_layers_names)
36
-
 
37
  boxes = []
38
  confidences = []
 
 
39
  for output in layer_outputs:
40
  for detection in output:
41
  scores = detection[5:]
42
  class_id = np.argmax(scores)
43
  confidence = scores[class_id]
44
-
45
- # If detected class is 'person'
46
- if classes[class_id] == "person" and confidence > 0.5:
 
47
  center_x = int(detection[0] * width)
48
  center_y = int(detection[1] * height)
49
  w = int(detection[2] * width)
50
  h = int(detection[3] * height)
51
- x = int(center_x - w / 2)
52
- y = int(center_y - h / 2)
 
 
 
53
  boxes.append([x, y, w, h])
54
  confidences.append(float(confidence))
55
-
56
  # Apply non-maximum suppression
57
  indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
58
- detections = []
59
- if len(indexes) > 0:
60
- for i in indexes.flatten():
61
- x, y, w, h = boxes[i]
62
- detections.append(([x, y, x + w, y + h], confidences[i]))
63
-
64
- # Update tracker with detections
65
- tracks = tracker.update_tracks(detections, frame=frame)
66
-
67
- # Track unique IDs
68
- for track in tracks:
69
- if not track.is_confirmed():
70
- continue
71
- track_id = track.track_id
72
- unique_people.add(track_id)
73
-
74
  cap.release()
75
-
 
76
  return {
77
- "Total Unique People Detected": len(unique_people),
78
- "Total Frames Processed": frame_count,
 
 
79
  }
80
 
81
- # Gradio Interface
82
- description = """
83
- Upload a video, and the app will count the total number of unique people detected in the video using YOLO and DeepSORT.
84
- """
 
 
 
85
  interface = gr.Interface(
86
- fn=count_unique_people,
87
  inputs=gr.Video(label="Upload Video"),
88
- outputs=gr.JSON(label="Unique People Count"),
89
- title="Unique People Counter",
90
- description=description,
91
  )
92
 
93
- if __name__ == "__main__":
94
- interface.launch()
 
1
+ # Install required libraries
2
+ !pip install gradio opencv-python-headless
3
+
4
+ # Download YOLO files
5
+ !wget -nc https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg
6
+ !wget -nc https://pjreddie.com/media/files/yolov3.weights
7
+ !wget -nc https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names
8
+
9
  import gradio as gr
10
  import cv2
11
  import numpy as np
 
 
 
 
 
 
12
 
13
+ def count_people(video_path):
14
+ # Load YOLO model
15
+ net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
16
+
17
+ # Load class names
18
+ with open('coco.names', 'r') as f:
19
+ classes = [line.strip() for line in f.readlines()]
20
+
21
  # Open video
22
  cap = cv2.VideoCapture(video_path)
 
 
23
 
 
24
  frame_count = 0
25
+ total_people_count = 0
26
+ people_per_frame = []
27
+
28
  while cap.isOpened():
29
  ret, frame = cap.read()
30
  if not ret:
31
  break
32
+
 
33
  height, width, _ = frame.shape
34
+
35
+ # Create blob from frame
36
+ blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416, 416), swapRB=True, crop=False)
37
  net.setInput(blob)
38
+
39
+ # Get output layer names
40
  output_layers_names = net.getUnconnectedOutLayersNames()
41
+
42
+ # Forward pass
43
  layer_outputs = net.forward(output_layers_names)
44
+
45
+ # Lists to store detected people
46
  boxes = []
47
  confidences = []
48
+
49
+ # Process detections
50
  for output in layer_outputs:
51
  for detection in output:
52
  scores = detection[5:]
53
  class_id = np.argmax(scores)
54
  confidence = scores[class_id]
55
+
56
+ # Check if detected object is a person
57
+ if classes[class_id] == 'person' and confidence > 0.5:
58
+ # Object detected
59
  center_x = int(detection[0] * width)
60
  center_y = int(detection[1] * height)
61
  w = int(detection[2] * width)
62
  h = int(detection[3] * height)
63
+
64
+ # Rectangle coordinates
65
+ x = int(center_x - w/2)
66
+ y = int(center_y - h/2)
67
+
68
  boxes.append([x, y, w, h])
69
  confidences.append(float(confidence))
70
+
71
  # Apply non-maximum suppression
72
  indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
73
+
74
+ # Count people in this frame
75
+ people_in_frame = len(indexes)
76
+ people_per_frame.append(people_in_frame)
77
+ total_people_count += people_in_frame
78
+
79
+ frame_count += 1
80
+
81
+ # Release resources
 
 
 
 
 
 
 
82
  cap.release()
83
+
84
+ # Prepare analytics
85
  return {
86
+ 'Total Frames Processed': frame_count,
87
+ 'Total People Detected': total_people_count,
88
+ 'Average People Per Frame': round(np.mean(people_per_frame), 2),
89
+ 'Max People in a Single Frame': int(np.max(people_per_frame))
90
  }
91
 
92
+ # Define Gradio interface
93
+ def analyze_video(video_file):
94
+ result = count_people(video_file)
95
+ result_str = "\n".join([f"{key}: {value}" for key, value in result.items()])
96
+ return result_str
97
+
98
+ # Gradio UI
99
  interface = gr.Interface(
100
+ fn=analyze_video,
101
  inputs=gr.Video(label="Upload Video"),
102
+ outputs=gr.Textbox(label="People Counting Results"),
103
+ title="YOLO-based People Counter",
104
+ description="Upload a video to detect and count people using YOLOv3."
105
  )
106
 
107
+ # Launch Gradio app
108
+ interface.launch()