ducdatit2002 commited on
Commit
94aaf34
·
verified ·
1 Parent(s): 92f7071

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +80 -0
  2. best.pt +3 -0
  3. readme.MD +11 -0
  4. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ import cv2
4
+ import numpy as np
5
+ from PIL import Image
6
+ import tempfile
7
+ import os
8
+ from ultralytics import YOLO
9
+
10
+ # Load the YOLOv8 model from the local path
11
+ model = YOLO('best.pt') # Directly reference the best.pt file in the same directory as app.py
12
+
13
+ def detect_people(image):
14
+ results = model(image)
15
+ return results
16
+
17
+ def process_results(results, image):
18
+ annotated_frame = image.copy()
19
+ person_detected = False
20
+
21
+ for result in results:
22
+ boxes = result.boxes.xyxy.cpu().numpy().astype(int) # Bounding boxes
23
+ if len(boxes) > 0:
24
+ person_detected = True
25
+ for box in boxes:
26
+ cv2.rectangle(annotated_frame, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2)
27
+
28
+ if person_detected:
29
+ cv2.putText(annotated_frame, 'Canh bao! Co nguoi! Co nguoi', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2, cv2.LINE_AA)
30
+
31
+ return annotated_frame
32
+
33
+ def main():
34
+ st.title("People Detection Application using YOLOv8")
35
+
36
+ option = st.sidebar.selectbox("Choose input type", ("Upload Image/Video", "Use Camera"))
37
+
38
+ if option == "Upload Image/Video":
39
+ uploaded_file = st.file_uploader("Upload Image or Video", type=['jpg', 'jpeg', 'png', 'mp4', 'avi', 'mov'])
40
+ if uploaded_file is not None:
41
+ if uploaded_file.type.startswith('image'):
42
+ file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
43
+ image = cv2.imdecode(file_bytes, 1)
44
+ st.image(image, caption='Uploaded Image.', use_column_width=True)
45
+ results = detect_people(image)
46
+ processed_frame = process_results(results, image)
47
+ st.image(processed_frame, caption='Processed Image.', use_column_width=True)
48
+
49
+ elif uploaded_file.type.startswith('video'):
50
+ tfile = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
51
+ tfile.write(uploaded_file.read())
52
+ tfile.close()
53
+ st.write(f'Temporary file path: {tfile.name}')
54
+ vidcap = cv2.VideoCapture(tfile.name)
55
+ stframe = st.empty()
56
+
57
+ while vidcap.isOpened():
58
+ success, frame = vidcap.read()
59
+ if not success:
60
+ break
61
+ results = detect_people(frame)
62
+ processed_frame = process_results(results, frame)
63
+ stframe.image(processed_frame, channels="BGR")
64
+ vidcap.release()
65
+ os.remove(tfile.name)
66
+
67
+ elif option == "Use Camera":
68
+ stframe = st.empty()
69
+ cap = cv2.VideoCapture(0)
70
+ while True:
71
+ ret, frame = cap.read()
72
+ if not ret:
73
+ break
74
+ results = detect_people(frame)
75
+ processed_frame = process_results(results, frame)
76
+ stframe.image(processed_frame, channels="BGR")
77
+ cap.release()
78
+
79
+ if __name__ == '__main__':
80
+ main()
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c6a7bd2e5cda881a364febba53bc797c5f82fbd9579005428ba94558566af8e
3
+ size 6235363
readme.MD ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # People Detection
2
+
3
+ ## How to run
4
+ - Step 1:
5
+ ```bash
6
+ pip install -r requirements.txt
7
+ ```
8
+ - Step 2:
9
+ ```bash
10
+ streamlit run app.py
11
+ ```
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ torch
3
+ opencv-python
4
+ ultralytics
5
+ numpy
6
+ Pillow