File size: 3,471 Bytes
2543827
7947da5
2543827
 
7947da5
 
 
2543827
9e147b4
 
 
6ef8707
2543827
 
 
 
 
 
 
 
9e147b4
 
 
 
2543827
 
 
9e147b4
 
 
 
2543827
9e147b4
2543827
 
 
9e147b4
2543827
 
9e147b4
2543827
 
9e147b4
2543827
 
9e147b4
 
2543827
 
9e147b4
 
 
 
 
2543827
 
 
 
9e147b4
 
 
 
2543827
 
 
 
 
9e147b4
 
2543827
9e147b4
 
2543827
 
 
 
 
 
 
 
 
9e147b4
2543827
 
9e147b4
4f09a22
 
f4239f7
 
9e147b4
 
2543827
 
 
 
 
 
 
9e147b4
2543827
9e147b4
2543827
 
 
9e147b4
 
2543827
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# Install required libraries
#!pip install gradio opencv-python-headless

# Download YOLO files
#!wget -nc https://raw.githubusercontent.com/pjreddie/darknet/master/cfg/yolov3.cfg
#!wget -nc https://pjreddie.com/media/files/yolov3.weights
#!wget -nc https://raw.githubusercontent.com/pjreddie/darknet/master/data/coco.names

import gradio as gr
import cv2
import numpy as np

def count_people(video_path):
    # Load YOLO model
    net = cv2.dnn.readNet('yolov3.weights', 'yolov3.cfg')
    
    # Load class names
    with open('coco.names', 'r') as f:
        classes = [line.strip() for line in f.readlines()]
    
    # Open video
    cap = cv2.VideoCapture(video_path)
    
    frame_count = 0
    total_people_count = 0
    people_per_frame = []
    
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        
        height, width, _ = frame.shape
        
        # Create blob from frame
        blob = cv2.dnn.blobFromImage(frame, 1/255.0, (416, 416), swapRB=True, crop=False)
        net.setInput(blob)
        
        # Get output layer names
        output_layers_names = net.getUnconnectedOutLayersNames()
        
        # Forward pass
        layer_outputs = net.forward(output_layers_names)
        
        # Lists to store detected people
        boxes = []
        confidences = []
        
        # Process detections
        for output in layer_outputs:
            for detection in output:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                
                # Check if detected object is a person
                if classes[class_id] == 'person' and confidence > 0.5:
                    # Object detected
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)
                    
                    # Rectangle coordinates
                    x = int(center_x - w/2)
                    y = int(center_y - h/2)
                    
                    boxes.append([x, y, w, h])
                    confidences.append(float(confidence))
        
        # Apply non-maximum suppression
        indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
        
        # Count people in this frame
        people_in_frame = len(indexes)
        people_per_frame.append(people_in_frame)
        total_people_count += people_in_frame
        
        frame_count += 1
    
    # Release resources
    cap.release()
    
    # Prepare analytics
    return {
        #'Total Frames Processed': frame_count,
        #'Total People Detected': total_people_count,
        #'Average People Per Frame': round(np.mean(people_per_frame), 2),
        'People in a Video': int(np.max(people_per_frame))  #Max People in a Single Frame
    }

# Define Gradio interface
def analyze_video(video_file):
    result = count_people(video_file)
    result_str = "\n".join([f"{key}: {value}" for key, value in result.items()])
    return result_str

# Gradio UI
interface = gr.Interface(
    fn=analyze_video,
    inputs=gr.Video(label="Upload Video"),
    outputs=gr.Textbox(label="People Counting Results"),
    title="YOLO-based People Counter",
    description="Upload a video to detect and count people using YOLOv3."
)

# Launch Gradio app
interface.launch()