hb-setosys commited on
Commit
9e147b4
·
verified ·
1 Parent(s): f42f834

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -0
app.py CHANGED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Install required libraries
2
+ !pip install gradio opencv-python-headless
3
+
4
+ # Download YOLO files
5
+ !wget -nc https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg
6
+ !wget -nc https://pjreddie.com/media/files/yolov3.weights
7
+ !wget -nc https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names
8
+
9
+ import gradio as gr
10
+ import cv2
11
+ import numpy as np
12
+
13
+ def count_people(video_path):
14
+ # Load YOLO model
15
+ net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
16
+
17
+ # Load class names
18
+ with open('coco.names', 'r') as f:
19
+ classes = [line.strip() for line in f.readlines()]
20
+
21
+ # Open video
22
+ cap = cv2.VideoCapture(video_path)
23
+
24
+ frame_count = 0
25
+ total_people_count = 0
26
+ people_per_frame = []
27
+
28
+ while cap.isOpened():
29
+ ret, frame = cap.read()
30
+ if not ret:
31
+ break
32
+
33
+ height, width, _ = frame.shape
34
+
35
+ # Create blob from frame
36
+ blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416, 416), swapRB=True, crop=False)
37
+ net.setInput(blob)
38
+
39
+ # Get output layer names
40
+ output_layers_names = net.getUnconnectedOutLayersNames()
41
+
42
+ # Forward pass
43
+ layer_outputs = net.forward(output_layers_names)
44
+
45
+ # Lists to store detected people
46
+ boxes = []
47
+ confidences = []
48
+
49
+ # Process detections
50
+ for output in layer_outputs:
51
+ for detection in output:
52
+ scores = detection[5:]
53
+ class_id = np.argmax(scores)
54
+ confidence = scores[class_id]
55
+
56
+ # Check if detected object is a person
57
+ if classes[class_id] == 'person' and confidence > 0.5:
58
+ # Object detected
59
+ center_x = int(detection[0] * width)
60
+ center_y = int(detection[1] * height)
61
+ w = int(detection[2] * width)
62
+ h = int(detection[3] * height)
63
+
64
+ # Rectangle coordinates
65
+ x = int(center_x - w/2)
66
+ y = int(center_y - h/2)
67
+
68
+ boxes.append([x, y, w, h])
69
+ confidences.append(float(confidence))
70
+
71
+ # Apply non-maximum suppression
72
+ indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
73
+
74
+ # Count people in this frame
75
+ people_in_frame = len(indexes)
76
+ people_per_frame.append(people_in_frame)
77
+ total_people_count += people_in_frame
78
+
79
+ frame_count += 1
80
+
81
+ # Release resources
82
+ cap.release()
83
+
84
+ # Prepare analytics
85
+ return {
86
+ 'Total Frames Processed': frame_count,
87
+ 'Total People Detected': total_people_count,
88
+ 'Average People Per Frame': round(np.mean(people_per_frame), 2),
89
+ 'Max People in a Single Frame': int(np.max(people_per_frame))
90
+ }
91
+
92
+ # Define Gradio interface
93
+ def analyze_video(video_file):
94
+ result = count_people(video_file)
95
+ result_str = "\n".join([f"{key}: {value}" for key, value in result.items()])
96
+ return result_str
97
+
98
+ # Gradio UI
99
+ interface = gr.Interface(
100
+ fn=analyze_video,
101
+ inputs=gr.Video(label="Upload Video"),
102
+ outputs=gr.Textbox(label="People Counting Results"),
103
+ title="YOLO-based People Counter",
104
+ description="Upload a video to detect and count people using YOLOv3."
105
+ )
106
+
107
+ # Launch Gradio app
108
+ interface.launch()