Kaelan commited on
Commit
056020b
·
1 Parent(s): 110d4b7
Dockerfile CHANGED
@@ -1,12 +1,3 @@
1
- FROM node:21.2-alpine3.17 as build-step
2
- WORKDIR /yolov-app
3
- ENV PATH /app/node_modules/.bin:$PATH
4
- COPY package.json package-lock.json ./
5
- COPY ./src ./src
6
- COPY ./public ./public
7
- RUN yarn install
8
- RUN yarn build
9
-
10
  FROM python:3.10-bullseye
11
 
12
  RUN apt-get update && apt-get install -y \
@@ -29,7 +20,6 @@ ENV HOME=/home/user \
29
  PATH=/home/user/.local/bin:$PATH
30
 
31
  WORKDIR $HOME/yolov-app
32
- COPY --chown=user --from=build-step /yolov-app/build ./build
33
  COPY --chown=user . $HOME/yolov-app
34
 
35
 
@@ -40,17 +30,17 @@ RUN gdown https://drive.google.com/file/d/1lBlZEh0v-WVUJsBb3ZJcUG8zxe5cckWT/view
40
  RUN gdown https://drive.google.com/file/d/14T5lpdJ0dPndYkMKr0sUcSq_myQDQSda/view?usp=sharing --fuzzy -O checkpoints/best181-8376/
41
  #ckpt_latest.pth
42
 
43
- RUN gdown https://drive.google.com/file/d/1pIDLMr0UUj_6fWYf9xHJ1lcKcCgmO7Vv/view?usp=sharing --fuzzy -O build/static/
44
  #kitchen.mp4
45
- RUN gdown https://drive.google.com/file/d/1ip07Lngcicd7pouKs_b0OhpVVoVFRC7U/view?usp=sharing --fuzzy -O build/static/
46
  #slip.mp4
47
- RUN gdown https://drive.google.com/file/d/1s93CZeEKEp_SwO5Y4KOjNYK0zWBgNiy7/view?usp=sharing --fuzzy -O build/static/
48
  #studycam.mp4
49
- RUN gdown https://drive.google.com/file/d/1lZNim3Sl_dQN6gcyEvddRLeijM6v-pO5/view?usp=sharing --fuzzy -O build/static/
50
  #cafe_fall.mp4
51
- RUN gdown https://drive.google.com/file/d/1Ru6ARgQOtshVgSUosFiH7GFRyRM-_R5U/view?usp=sharing --fuzzy -O build/static/
52
  #skate.mp4
53
- RUN gdown https://drive.google.com/file/d/1Md80GemqcuWhVDKXftNNt6owCQQQlWxi/view?usp=sharing --fuzzy -O build/static/
54
  #Sample.png
55
 
56
  EXPOSE 5000
 
 
 
 
 
 
 
 
 
 
1
  FROM python:3.10-bullseye
2
 
3
  RUN apt-get update && apt-get install -y \
 
20
  PATH=/home/user/.local/bin:$PATH
21
 
22
  WORKDIR $HOME/yolov-app
 
23
  COPY --chown=user . $HOME/yolov-app
24
 
25
 
 
30
  RUN gdown https://drive.google.com/file/d/14T5lpdJ0dPndYkMKr0sUcSq_myQDQSda/view?usp=sharing --fuzzy -O checkpoints/best181-8376/
31
  #ckpt_latest.pth
32
 
33
+ RUN gdown https://drive.google.com/file/d/1pIDLMr0UUj_6fWYf9xHJ1lcKcCgmO7Vv/view?usp=sharing --fuzzy -O uploads/
34
  #kitchen.mp4
35
+ RUN gdown https://drive.google.com/file/d/1ip07Lngcicd7pouKs_b0OhpVVoVFRC7U/view?usp=sharing --fuzzy -O uploads/
36
  #slip.mp4
37
+ RUN gdown https://drive.google.com/file/d/1s93CZeEKEp_SwO5Y4KOjNYK0zWBgNiy7/view?usp=sharing --fuzzy -O uploads/
38
  #studycam.mp4
39
+ RUN gdown https://drive.google.com/file/d/1lZNim3Sl_dQN6gcyEvddRLeijM6v-pO5/view?usp=sharing --fuzzy -O uploads/
40
  #cafe_fall.mp4
41
+ RUN gdown https://drive.google.com/file/d/1Ru6ARgQOtshVgSUosFiH7GFRyRM-_R5U/view?usp=sharing --fuzzy -O uploads/
42
  #skate.mp4
43
+ RUN gdown https://drive.google.com/file/d/1Md80GemqcuWhVDKXftNNt6owCQQQlWxi/view?usp=sharing --fuzzy -O uploads/
44
  #Sample.png
45
 
46
  EXPOSE 5000
app.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from super_gradients.training import models
3
+ from deep_sort_torch.deep_sort.deep_sort import DeepSort
4
+ from super_gradients.training import models
5
+ from super_gradients.training.pipelines.pipelines import DetectionPipeline
6
+ from model_tools import get_prediction, get_color
7
+ import cv2
8
+ import datetime
9
+ import torch
10
+ import os
11
+ import gradio as gr
12
+ import numpy as np
13
+
14
+ np.float = float
15
+ np.int = int
16
+ np.object = object
17
+ np.bool = bool
18
+
19
+ dir = os.getcwd()+ '/uploads/'
20
+
21
+ inp = gr.Image(type="pil")
22
+ output = gr.Image(type="pil")
23
+
24
+ examples=[[dir +"cafe_fall.mp4","Fall in cafe"],
25
+ [dir +"slip.mp4","Run and Fall2"],
26
+ [dir +"skate.mp4","Skate and Fall"],
27
+ [dir +"kitchen.mp4","Fall in kitchen"],
28
+ [dir +"studycam.mp4","Experiment fall"]]
29
+
30
+ ckpt_path = os.getcwd() + "/checkpoints/best181-8376/ckpt_latest.pth"
31
+ best_model = models.get('yolo_nas_s',
32
+ num_classes=1,
33
+ checkpoint_path=ckpt_path)
34
+
35
+ best_model = best_model.to("cuda" if torch.cuda.is_available() else "cpu")
36
+ #best_model = models.get("yolo_nas_s", pretrained_weights="coco")
37
+ best_model.eval()
38
+
39
+ #### Initiatize tracker
40
+ tracker_model = os.getcwd() + "/checkpoints/ckpt.t7"
41
+ tracker = DeepSort(model_path=tracker_model,max_age=30,nn_budget=100, max_iou_distance=0.7, max_dist=0.2)
42
+ out_path=dir
43
+ filename = 'demo.webm'
44
+
45
+ description = "Yolo model to detect if a person is falling or fallen with deepsort to track how long the subject has fallen.\
46
+ If the duration crosses a threshold of 5s, the bounding box will turn red and the subject be labelled as IMMOBILE."
47
+
48
+ def vid_predict(media):
49
+
50
+ pipeline = DetectionPipeline(
51
+ model=best_model,
52
+ image_processor=best_model._image_processor,
53
+ post_prediction_callback=best_model.get_post_prediction_callback(iou=0.25, conf=0.70),
54
+ class_names=best_model._class_names,
55
+ )
56
+
57
+ print("Running Predict")
58
+ save_to = os.path.join(out_path, filename)
59
+ cap = cv2.VideoCapture(media)
60
+
61
+ if cap.isOpened():
62
+
63
+ width = cap.get(3) # float `widtqh`
64
+ print('width',width)
65
+ height = cap.get(4)
66
+ print('Height',height)
67
+ fps = cap.get(cv2.CAP_PROP_FPS)
68
+ # or
69
+ fps = cap.get(5)
70
+
71
+ print('fps:', fps) # float `fps`
72
+
73
+ frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
74
+ # or frame_count = cap.get(7)
75
+
76
+ print('frames count:', frame_count) # float `frame_count`
77
+
78
+ out = cv2.VideoWriter(save_to, cv2.VideoWriter_fourcc(*'VP08'), fps, (640,640))
79
+ fall_records = {}
80
+ frame_id = 0
81
+ while True:
82
+ frame_id += 1
83
+ if frame_id > frame_count:
84
+ break
85
+ print('frame_id', frame_id)
86
+
87
+ ret, img = cap.read()
88
+ #img = cv2.resize(img, (1280, 720),cv2.INTER_AREA)
89
+ # if height > 720:
90
+ # print("Reshaped")
91
+ img = cv2.resize(img, (640, 640),cv2.INTER_AREA)
92
+ width, height = img.shape[1], img.shape[0]
93
+
94
+ ### recalibrate color channels to rgb for use in model prediction
95
+ img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
96
+ overlay = img.copy()
97
+
98
+ ### create list objects needed for tracking
99
+ detects = []
100
+ conffs = []
101
+
102
+
103
+ if ret:
104
+ print("START ")
105
+ model_predictions = get_prediction(best_model, img_rgb, pipeline)
106
+ print(model_predictions)
107
+ classnames = ['Fall-Detected']
108
+ results = model_predictions
109
+ bboxes = results.bboxes_xyxy
110
+
111
+ if len(bboxes) >= 1:
112
+ confs = results.confidence
113
+ labels = results.labels
114
+
115
+ for bbox, conf, label in zip(bboxes, confs, labels):
116
+ label = int(label)
117
+ conf = np.round(conf, decimals=2)
118
+
119
+ x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
120
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
121
+
122
+ ### for tracking model
123
+ bw = abs(x1 - x2)
124
+ bh = abs(y1 - y2)
125
+ cx , cy = x1 + bw//2, y1 + bh//2
126
+
127
+ coords = [cx, cy, bw, bh]
128
+ detects.append(coords)
129
+ conffs.append([float(conf)])
130
+
131
+ ### Tracker
132
+ xywhs = torch.tensor(detects)
133
+ conffs = torch.tensor(conffs)
134
+ #tracker_results = deepsort.update(xywhs, confss,oids, img)
135
+ tracker_results = tracker.update(xywhs, conffs, img_rgb)
136
+
137
+ ### conduct check on track_records
138
+ now = datetime.datetime.now()
139
+ if len(fall_records.keys()) >=1:
140
+ #print(fall_records)
141
+
142
+ ### reset timer for calculating immobility to 0 if time lapsed since last detection of fall more than N seconds
143
+ fall_records = {id: item if (now - item['present']).total_seconds() <= 3.0 else {'start':now, 'present': now} for id, item in fall_records.items() }
144
+
145
+
146
+ if len(tracker_results)>=1:
147
+ for track,conf,label in zip(tracker_results,conffs, labels):
148
+ conf = conf.numpy()[0]
149
+ duration = 0
150
+ minute = 0
151
+ sec = 0
152
+ x1, y1 ,x2, y2, id = track
153
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
154
+
155
+ if id in fall_records.keys():
156
+ ### record present time
157
+ present = datetime.datetime.now()
158
+ fall_records[id].update({'present': present})
159
+
160
+ ### calculate duration
161
+ duration = fall_records[id]['present'] - fall_records[id]['start']
162
+ duration = int(duration.total_seconds())
163
+
164
+ ### record status
165
+ fall_records[id].update({'status': 'IMMOBILE'}) if duration >= 5 else fall_records[id].update({'status': None})
166
+ print(f"Frame:{frame_id} ID: {id} Conf: {conf} Duration:{duration} Status: {fall_records[id]['status']}")
167
+ print(fall_records[id])
168
+ minute, sec = divmod(duration,60)
169
+
170
+ else:
171
+ start = datetime.datetime.now()
172
+ fall_records[id] = {'start': start}
173
+ fall_records[id].update({'present': start})
174
+
175
+ classname = classnames[int(label)]
176
+
177
+
178
+ color = get_color(id*20)
179
+ if duration < 5:
180
+ display_text = f"{str(classname)} ({str(id)}) {str(conf)} Elapsed: {round(minute)}min{round(sec)}s"
181
+ (w, h), _ = cv2.getTextSize(
182
+ display_text, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 1)
183
+ cv2.rectangle(img,(x1, y1), (x2, y2),color,1)
184
+ cv2.rectangle(overlay,(x1, y1), (x2, y2),color,1)
185
+ cv2.rectangle(overlay, (min(x1,int(width)-w), max(1,y1 - 20)), (min(x1+ w,int(width)) , max(21,y1)), color, cv2.FILLED)
186
+ else:
187
+ display_text = f"{str(classname)} ({str(id)}) {str(conf)} IMMOBILE: {round(minute)}min{round(sec)}s "
188
+ (w, h), _ = cv2.getTextSize(
189
+ display_text, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 1)
190
+ cv2.rectangle(img,(x1, y1), (x2, y2),(0,0,255),1)
191
+ cv2.rectangle(overlay,(x1, y1), (x2, y2),(0,0,255),1)
192
+ cv2.rectangle(overlay, (min(x1,int(width)-w), max(1,y1 - 20)), (min(x1+ w,int(width)) , max(21,y1)), (0,0,255), cv2.FILLED)
193
+
194
+ cv2.putText(img,display_text, (min(x1,int(width)-w), max(21,y1)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,0),2)
195
+ cv2.putText(overlay,display_text, (min(x1,int(width)-w), max(21,y1)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,0),2)
196
+
197
+ alpha = 0.6
198
+ masked = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
199
+ out.write(masked)
200
+
201
+ cap.release()
202
+ out.release()
203
+
204
+ cv2.destroyAllWindows()
205
+
206
+ return save_to
207
+
208
+ demo = gr.Interface(fn=vid_predict, inputs=gr.Video(), outputs=gr.Video(), examples=examples, description=description, title='Fall detection and tracking with deep sort')
209
+
210
+ if __name__ == "__main__":
211
+ demo.launch(show_api=False)
main.py DELETED
@@ -1,114 +0,0 @@
1
- from flask import Flask, request, render_template, send_from_directory
2
- from flask import flash, request, redirect, url_for, Response, make_response
3
- from werkzeug.utils import secure_filename
4
-
5
- from super_gradients.training import models
6
- from deep_sort_torch.deep_sort.deep_sort import DeepSort
7
-
8
- import torch
9
-
10
- from model_tools import vid_predict, img_predict
11
- from dotenv import load_dotenv
12
- import os
13
- import urllib.request
14
-
15
- load_dotenv()
16
- secret_key = os.getenv("secret_key")
17
-
18
- dir = os.getcwd()+ f'/build'
19
- dir_static= dir + '/static'
20
- dir_ckpt = os.getcwd()+ f'/checkpoints'
21
-
22
- ckpt_path = dir_ckpt + "/best181-8376/ckpt_latest.pth"
23
- best_model = models.get('yolo_nas_s',
24
- num_classes=1,
25
- checkpoint_path=ckpt_path)
26
-
27
- best_model = best_model.to("cuda" if torch.cuda.is_available() else "cpu")
28
- best_model.eval()
29
-
30
- #### Initiatize tracker
31
- tracker_model = "./checkpoints/ckpt.t7"
32
- tracker = DeepSort(model_path=tracker_model,max_age=30,nn_budget=100, max_iou_distance=0.7, max_dist=0.2)
33
-
34
-
35
- ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif', 'mp4', 'avi','webm'}
36
-
37
- app = Flask(__name__, template_folder=dir,static_folder=dir_static)
38
- app.config['UPLOAD_FOLDER'] = dir_static
39
- app.config['MAX_CONTENT_LENGTH'] = 20*1024*1024
40
- app.secret_key = secret_key
41
-
42
- def allowed_file(filename):
43
- return '.' in filename and \
44
- filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
45
-
46
- @app.route('/')
47
- def index():
48
- predictions = False
49
- return render_template('index.html', predictions=predictions)
50
-
51
- @app.route('/upload', methods=["GET", "POST"])
52
- def upload():
53
-
54
- if request.method == 'POST':
55
- print('Form',request.form.get('options'))
56
- try:
57
- filename = request.form.get('options')
58
- if filename:
59
- save_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
60
- filetype = 'video'
61
- new_filename = filename[:-4] + ".webm"
62
- save_to = vid_predict(save_path,best_model,tracker, out_path=dir_static, filename=new_filename)
63
- save_to = url_for('static', filename=new_filename)
64
- predictions = True
65
- return render_template('index.html', predictions=predictions, saved_outout=save_to, ft=filetype)
66
- except:
67
- pass
68
- # check if the post request has the file part
69
- if ('file') and ('media') not in request.files:
70
- flash('No file part')
71
- return redirect(request.url)
72
-
73
- try:
74
- file = request.files['file']
75
- except:
76
- file = request.files['media']
77
- # If the user does not select a file, the browser submits an
78
- # empty file without a filename.
79
- if file.filename == '':
80
- flash('No selected file')
81
- return redirect(request.url)
82
-
83
- if file and allowed_file(file.filename):
84
- filename = secure_filename(file.filename)
85
- save_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
86
- file.save(save_path)
87
- new_filename = filename[:-4] + ".webm"
88
-
89
- if filename[-3:] in ['mp4','avi','webm']:
90
- print("VIDEO")
91
- filetype = 'video'
92
- save_to = vid_predict(save_path,best_model,tracker, out_path=dir_static, filename=new_filename)
93
- save_to = url_for('static', filename=new_filename)
94
- else:
95
- print("IMAGE")
96
- filetype = 'image'
97
- save_to = img_predict(save_path,best_model, out_path=dir_static, filename=new_filename)
98
- save_to = url_for('static', filename="pred_0.jpg")
99
-
100
- predictions = True
101
-
102
- return render_template('index.html', predictions=predictions, saved_outout=save_to, ft=filetype)
103
-
104
-
105
-
106
- @app.route('/static/<folder>/<file>')
107
- def css(folder,file):
108
- ''' User will call with with thier id to store the symbol as registered'''
109
- path = folder+'/'+file
110
- return send_from_directory(directory=dir_static,path=path)
111
-
112
-
113
- if __name__ == "__main__":
114
- app.run(host="0.0.0.0", port=7860)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
model_tools.py CHANGED
@@ -4,18 +4,22 @@ from matplotlib.colors import hsv_to_rgb
4
  import torch
5
  import numpy as np
6
  from super_gradients.training import models
 
 
 
7
  from deep_sort_torch.deep_sort.deep_sort import DeepSort
8
  import os
9
 
 
10
 
11
  def get_color(number):
12
  """ Converts an integer number to a color """
13
-
14
  hue = number*30 % 180
15
  saturation = number*103 % 256
16
  value = number*50 % 256
17
 
18
-
19
  hsv_array = [hue/179, saturation/255, value/255]
20
  rgb = hsv_to_rgb(hsv_array)
21
 
@@ -28,179 +32,25 @@ def img_predict(media, model, out_path,filename):
28
 
29
  return None
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
 
32
- def vid_predict(media, model, tracker, out_path,filename):
33
- print("Running Predict")
34
- save_to = os.path.join(out_path, filename)
35
- cap = cv2.VideoCapture(media)
36
-
37
- if cap.isOpened():
38
-
39
- width = cap.get(3) # float `widtqh`
40
- print('width',width)
41
- height = cap.get(4)
42
- print('Height',height)
43
- fps = cap.get(cv2.CAP_PROP_FPS)
44
- # or
45
- fps = cap.get(5)
46
-
47
- print('fps:', fps) # float `fps`
48
-
49
- frame_count = cap.get(cv2.CAP_PROP_FRAME_COUNT)
50
- # or frame_count = cap.get(7)
51
-
52
- print('frames count:', frame_count) # float `frame_count`
53
-
54
- out = cv2.VideoWriter(save_to, cv2.VideoWriter_fourcc(*'VP08'), fps, (640,640))
55
- fall_records = {}
56
- frame_id = 0
57
- while True:
58
- frame_id += 1
59
- if frame_id > frame_count:
60
- break
61
- print('frame_id', frame_id)
62
-
63
- ret, img = cap.read()
64
- img = cv2.resize(img, (640, 640),cv2.INTER_AREA)
65
- width, height = img.shape[1], img.shape[0]
66
-
67
- ### recalibrate color channels to rgb for use in model prediction
68
- img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
69
- overlay = img.copy()
70
-
71
- ### create list objects needed for tracking
72
- detects = []
73
- conffs = []
74
-
75
-
76
- if ret:
77
-
78
- model_predictions = model.predict(img_rgb,conf=0.70,fuse_model=False)
79
- classnames = model_predictions[0].class_names
80
- results = model_predictions[0].prediction
81
- bboxes = results.bboxes_xyxy
82
-
83
- if len(bboxes) >= 1:
84
- confs = results.confidence
85
- labels = results.labels
86
-
87
- for bbox, conf, label in zip(bboxes, confs, labels):
88
- label = int(label)
89
- conf = np.round(conf, decimals=2)
90
-
91
-
92
-
93
- x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
94
- x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
95
-
96
- ### for tracking model
97
- bw = abs(x1 - x2)
98
- bh = abs(y1 - y2)
99
- cx , cy = x1 + bw//2, y1 + bh//2
100
-
101
- coords = [cx, cy, bw, bh]
102
- detects.append(coords)
103
- conffs.append([float(conf)])
104
-
105
- ### Tracker
106
- xywhs = torch.tensor(detects)
107
- conffs = torch.tensor(conffs)
108
- tracker_results = tracker.update(xywhs, conffs, img_rgb)
109
-
110
- ### conduct check on track_records
111
- now = datetime.datetime.now()
112
- if len(fall_records.keys()) >=1:
113
-
114
-
115
- ### reset timer for calculating immobility to 0 if time lapsed since last detection of fall more than N seconds
116
- fall_records = {id: item if (now - item['present']).total_seconds() <= 3.0 else {'start':now, 'present': now} for id, item in fall_records.items() }
117
-
118
-
119
- if len(tracker_results)>=1:
120
- for track,conf,label in zip(tracker_results,conffs, labels):
121
- conf = conf.numpy()[0]
122
- duration = 0
123
- minute = 0
124
- sec = 0
125
- x1, y1 ,x2, y2, id = track
126
- x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
127
-
128
- if id in fall_records.keys():
129
- ### record present time
130
- present = datetime.datetime.now()
131
- fall_records[id].update({'present': present})
132
-
133
- ### calculate duration
134
- duration = fall_records[id]['present'] - fall_records[id]['start']
135
- duration = int(duration.total_seconds())
136
-
137
- ### record status
138
- fall_records[id].update({'status': 'IMMOBILE'}) if duration >= 5 else fall_records[id].update({'status': None})
139
- print(f"Frame:{frame_id} ID: {id} Conf: {conf} Duration:{duration} Status: {fall_records[id]['status']}")
140
- print(fall_records[id])
141
- minute, sec = divmod(duration,60)
142
-
143
- else:
144
- start = datetime.datetime.now()
145
- fall_records[id] = {'start': start}
146
- fall_records[id].update({'present': start})
147
-
148
- classname = classnames[int(label)]
149
-
150
 
151
- color = get_color(id*20)
152
- if duration < 5:
153
- display_text = f"{str(classname)} ({str(id)}) {str(conf)} Elapsed: {round(minute)}min{round(sec)}s"
154
- (w, h), _ = cv2.getTextSize(
155
- display_text, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 1)
156
- cv2.rectangle(img,(x1, y1), (x2, y2),color,1)
157
- cv2.rectangle(overlay,(x1, y1), (x2, y2),color,1)
158
- cv2.rectangle(overlay, (min(x1,int(width)-w), max(1,y1 - 20)), (min(x1+ w,int(width)) , max(21,y1)), color, cv2.FILLED)
159
- else:
160
- display_text = f"{str(classname)} ({str(id)}) {str(conf)} IMMOBILE: {round(minute)}min{round(sec)}s "
161
- (w, h), _ = cv2.getTextSize(
162
- display_text, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 1)
163
- cv2.rectangle(img,(x1, y1), (x2, y2),(0,0,255),1)
164
- cv2.rectangle(overlay,(x1, y1), (x2, y2),(0,0,255),1)
165
- cv2.rectangle(overlay, (min(x1,int(width)-w), max(1,y1 - 20)), (min(x1+ w,int(width)) , max(21,y1)), (0,0,255), cv2.FILLED)
166
-
167
- cv2.putText(img,display_text, (min(x1,int(width)-w), max(21,y1)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,0),2)
168
- cv2.putText(overlay,display_text, (min(x1,int(width)-w), max(21,y1)), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,0),2)
169
-
170
-
171
-
172
- ### output image
173
- alpha = 0.6
174
- masked = cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0)
175
- out.write(masked)
176
-
177
-
178
- cap.release()
179
- out.release()
180
-
181
- cv2.destroyAllWindows()
182
-
183
-
184
-
185
- if __name__ == '__main__':
186
- #ckpt_path = "/home/kaelan/Projects/Jupyter/Pytorch/Yolo-Nas/yolov-app/checkpoints/ckpt_latest.pth"
187
- ckpt_path = "/home/kaelan/Projects/Jupyter/Pytorch/Yolo-Nas/checkpoints_Fall_detection/Fall_yolonas_run2/ckpt_latest.pth"
188
 
189
- best_model = models.get('yolo_nas_s',
190
- num_classes=1,
191
- checkpoint_path=ckpt_path)
192
- # best_model.set_dataset_processing_params(
193
- # class_names=['Fall-Detected'],
194
- # iou=0.35, conf=0.7,
195
- # )
196
- best_model = best_model.to("cuda" if torch.cuda.is_available() else "cpu")
197
- #best_model = models.get("yolo_nas_s", pretrained_weights="coco")
198
- best_model.eval()
199
-
200
- #### Initiatize tracker
201
- tracker_model = "./checkpoints/ckpt.t7"
202
- tracker = DeepSort(model_path=tracker_model,max_age=30,nn_budget=100, max_iou_distance=0.7, max_dist=0.2)
203
-
204
- title = "skate.mp4"
205
- media = "/home/kaelan/Projects/data/videos/" + title
206
- vid_predict(media,best_model,tracker)
 
4
  import torch
5
  import numpy as np
6
  from super_gradients.training import models
7
+ from super_gradients.training.models.detection_models.customizable_detector import CustomizableDetector
8
+ from super_gradients.training.pipelines.pipelines import DetectionPipeline
9
+
10
  from deep_sort_torch.deep_sort.deep_sort import DeepSort
11
  import os
12
 
13
+ # make sure to set IOU and confidence in the pipeline constructor
14
 
15
  def get_color(number):
16
  """ Converts an integer number to a color """
17
+ # change these however you want to
18
  hue = number*30 % 180
19
  saturation = number*103 % 256
20
  value = number*50 % 256
21
 
22
+ # expects normalized values
23
  hsv_array = [hue/179, saturation/255, value/255]
24
  rgb = hsv_to_rgb(hsv_array)
25
 
 
32
 
33
  return None
34
 
35
+ def get_prediction(model, image_in, pipeline):
36
+ ''' Obtains DetectionPrediction object from a single input RGB image
37
+ '''
38
+ # Preprocess
39
+ preprocessed_image, processing_metadata = pipeline.image_processor.preprocess_image(image=image_in.copy())
40
+
41
+ # Predict
42
+ with torch.no_grad():
43
+ torch_input = torch.Tensor(preprocessed_image).unsqueeze(0).to('cuda')
44
+ model_output = model(torch_input)
45
+ prediction = pipeline._decode_model_output(model_output, model_input=torch_input)
46
+ # Postprocess
47
+ return pipeline.image_processor.postprocess_predictions(predictions=prediction[0], metadata=processing_metadata)
48
+
49
+
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+
54
+
55
+
56
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
package-lock.json DELETED
The diff for this file is too large to render. See raw diff
 
package.json DELETED
@@ -1,40 +0,0 @@
1
- {
2
- "name": "yolov-app",
3
- "version": "0.1.0",
4
- "private": true,
5
- "dependencies": {
6
- "axios": "^1.6.2",
7
- "react": "^18.2.0",
8
- "react-dom": "^18.2.0",
9
- "react-scripts": "5.0.1"
10
- },
11
- "scripts": {
12
- "start": "react-scripts start",
13
- "build": "react-scripts build",
14
- "test": "react-scripts test",
15
- "eject": "react-scripts eject"
16
- },
17
- "eslintConfig": {
18
- "extends": [
19
- "react-app",
20
- "react-app/jest"
21
- ]
22
- },
23
- "browserslist": {
24
- "production": [
25
- ">0.2%",
26
- "not dead",
27
- "not op_mini all"
28
- ],
29
- "development": [
30
- "last 1 chrome version",
31
- "last 1 firefox version",
32
- "last 1 safari version"
33
- ]
34
- },
35
- "devDependencies": {
36
- "dart-sass": "^1.25.0",
37
- "node-sass": "npm:dart-sass@^1.25.0",
38
- "node-sass-install": "^1.0.2"
39
- }
40
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
public/favicon.ico DELETED
Binary file (3.87 kB)
 
public/index.html DELETED
@@ -1,112 +0,0 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
-
4
- <head>
5
- <meta charset="utf-8" />
6
- <link rel="icon" href="%PUBLIC_URL%/favicon.ico" />
7
- <link rel="stylesheet" href="{{url_for('static', filename='css/index.css')}}">
8
- <meta name="viewport" content="width=device-width, initial-scale=1" />
9
- <meta name="theme-color" content="#000000" />
10
- <meta name="description" content="Web site created using create-react-app" />
11
- <link rel="apple-touch-icon" href="%PUBLIC_URL%/logo192.png" />
12
- <!--
13
- manifest.json provides metadata used when your web app is installed on a
14
- user's mobile device or desktop. See https://developers.google.com/web/fundamentals/web-app-manifest/
15
- -->
16
- <link rel="manifest" href="%PUBLIC_URL%/manifest.json" />
17
- <!--
18
- Notice the use of %PUBLIC_URL% in the tags above.
19
- It will be replaced with the URL of the `public` folder during the build.
20
- Only files inside the `public` folder can be referenced from the HTML.
21
-
22
- Unlike "/favicon.ico" or "favicon.ico", "%PUBLIC_URL%/favicon.ico" will
23
- work correctly both with client-side routing and a non-root public URL.
24
- Learn how to configure a non-root public URL by running `npm run build`.
25
- -->
26
- <link rel="preconnect" href="https://fonts.gstatic.com">
27
- <link href="https://fonts.googleapis.com/css2?family=Poppins:wght@300;400;600&display=swap" rel="stylesheet">
28
- <link href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-T3c6CoIi6uLrA9TneNEoa7RxnatzjcDSCmG1MXxSR1GAsXEV/Dwwykc2MPK8M2HN" crossorigin="anonymous">
29
- <title>FALL DETECTION App</title>
30
-
31
- </head>
32
-
33
-
34
- <body>
35
- <noscript>You need to enable JavaScript to run this app.</noscript>
36
- <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/js/bootstrap.bundle.min.js" integrity="sha384-C6RzsynM9kWDrMNeT87bh95OGNyZPhcTNXj1NW7RuBCsyN/o0jlpcV8Qyq46cDfL" crossorigin="anonymous"></script>
37
- <div id="content">
38
- <nav class="navbar navbar-dark bg-dark">
39
- <a class="navbar-brand" style="color: #8B008B" href="#">FALL DETECTION</a>
40
-
41
- </nav>
42
-
43
- <!--OFF Canvas-->>
44
- <br>
45
- <div class="text-center">
46
- <button class="btn btn-dark btn-lg" type="button" data-bs-toggle="offcanvas" data-bs-target="#offcanvasTop" aria-controls="offcanvasTop">Toggle Introduction to Experiment</button>
47
- </div>
48
-
49
-
50
-
51
- <div class="offcanvas offcanvas-top" tabindex="-1" id="offcanvasTop" aria-labelledby="offcanvasTopLabel" style="background-color: #0b0b0bcf">
52
- <div class="offcanvas-header">
53
- <h5 id="offcanvasTopLabel">Introduction on experiment</h5>
54
-
55
- <button type="button" class="btn-close text-reset" data-bs-dismiss="offcanvas" aria-label="Close"></button>
56
- </div>
57
- <div class="offcanvas-body">
58
-
59
- <h2 style="color: #FEFBEA"> Introduction on experiment </h2>
60
- <img src="{{ url_for('static', filename='Sample.png') }}" ,width="240" height="240">
61
- <ol>
62
- <li style="color: #FEFBEA"><b style="color: #FEFBEA"> Experimentation on using YOLO-NAS model(s-size) to do fall recognition. The model is trained on images of various scenarios of people falling or have fallen.</b></li>
63
- <li style="color: #FEFBEA"><b style="color: #FEFBEA">Deepsort alogrithm is use to track falls. There will be an id number next to the classification label "FALL DETECTED" and confidence in prediction </b></li>
64
- <li style="color: #FEFBEA"><b style="color: #FEFBEA">For this experiment if an individual has been tracked as fallen for more than 5 seconds, the bounding box of that personal will turn red and be tagged as "IMMOBILE".</b></li>
65
- <li style="color: #FEFBEA"><b style="color: #FEFBEA">The model has achieve more than 0.8 score on @map50 which will serve as a good baseline </b>
66
- <li style="color: #FEFBEA"><b style="color: #FEFBEA"> More datasets can be downloaded to test on the model at http://www.iro.umontreal.ca/~labimage/Dataset/ and https://www.kaggle.com/datasets/tuyenldvn/falldataset-imvia</b>
67
- </ol>
68
- </div>
69
- </div>
70
-
71
-
72
- <div class="container text-center">
73
- <div class="row align-items-start">
74
-
75
- <div class="col" id="root">
76
- </div>
77
-
78
- <div class="col">
79
- <h1>RESULT</h1>
80
- <p>Video will take afew minutes or more to process. Upload images for faster processing </p>
81
- {% if predictions and ft=='video': %}
82
- <video class="border border-secondary" controls autoplay><source src={{saved_outout}} type="video/mp4" height='680' width='680' name={{predictions}}></video>
83
- {% elif predictions and ft=='image': %}
84
- <img class="border border-secondary" src={{saved_outout}} style="width:640px;height:640px;">
85
- {% else %}
86
- <div class="square"></div>
87
- {% endif %}
88
- </div>
89
-
90
- </div>
91
- </div>
92
-
93
-
94
- <!---<div id="root">
95
-
96
- </div>--->
97
-
98
- <!--
99
- This HTML file is a template.
100
- If you open it directly in the browser, you will see an empty page.
101
-
102
- You can add webfonts, meta tags, or analytics to this file.
103
- The build step will place the bundled scripts into the <body> tag.
104
-
105
- To begin the development, run `npm start` or `yarn start`.
106
- To create a production bundle, use `npm run build` or `yarn build`.
107
-
108
- -->
109
- </div>
110
- </body>
111
-
112
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
public/logo192.png DELETED
Binary file (5.35 kB)
 
public/logo512.png DELETED
Binary file (9.66 kB)
 
public/manifest.json DELETED
@@ -1,25 +0,0 @@
1
- {
2
- "short_name": "React App",
3
- "name": "Create React App Sample",
4
- "icons": [
5
- {
6
- "src": "favicon.ico",
7
- "sizes": "64x64 32x32 24x24 16x16",
8
- "type": "image/x-icon"
9
- },
10
- {
11
- "src": "logo192.png",
12
- "type": "image/png",
13
- "sizes": "192x192"
14
- },
15
- {
16
- "src": "logo512.png",
17
- "type": "image/png",
18
- "sizes": "512x512"
19
- }
20
- ],
21
- "start_url": ".",
22
- "display": "standalone",
23
- "theme_color": "#000000",
24
- "background_color": "#ffffff"
25
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
public/robots.txt DELETED
@@ -1,3 +0,0 @@
1
- # https://www.robotstxt.org/robotstxt.html
2
- User-agent: *
3
- Disallow:
 
 
 
 
requirements.txt CHANGED
@@ -161,5 +161,6 @@ youtube-dl==2021.12.17
161
  uvicorn
162
  yt-dlp==2023.10.13
163
  gunicorn
 
164
  gdown
165
  zipp
 
161
  uvicorn
162
  yt-dlp==2023.10.13
163
  gunicorn
164
+ gradio
165
  gdown
166
  zipp
src/App.js DELETED
@@ -1,121 +0,0 @@
1
- import { useState,useEffect,useRef } from 'react';
2
-
3
- window.addEventListener("click",() => {
4
- const loader = document.querySelector(".loader");
5
-
6
- loader.classList.add("loader-hidden");
7
-
8
- loader.addEventListener("transitionend",() => {
9
- document.body.removeChild("loader")
10
- })
11
- })
12
-
13
-
14
- function App() {
15
- const [imageURL, setImageURL] = useState(null);
16
- const imageRef = useRef()
17
- const [imageURL1, setImageURL1] = useState(null);
18
- const [imageURL2, setImageURL2] = useState(null);
19
- const [file_img, setfile_img] = useState(null);
20
- const [file_video, setfile_video] = useState(null);
21
- const imageRef2 = useRef()
22
-
23
- const uploadImage = (e) => {
24
- console.log(e)
25
- const{ files } = e.target
26
- const filetype = e.target.files[0].type.slice(0,5)
27
- console.log(filetype)
28
-
29
- if (filetype === 'image') {
30
- setfile_img(filetype)
31
- setfile_video(null)
32
- } else if (filetype === 'video') {
33
- setfile_video(filetype)
34
- setfile_img(null)
35
-
36
- } else {
37
- setfile_img(null)
38
- setfile_video(null)
39
- }
40
-
41
- if (files.length > 0) {
42
- const url = URL.createObjectURL(files[0])
43
- //const a = document.createElement('a');
44
- //a.href = url
45
- //console.log('A', a)
46
- //a.download = 'save01.mp4';
47
- //a.click();
48
-
49
- setImageURL(url)
50
- setImageURL1(url)
51
- } else {
52
- setImageURL(null)
53
- setImageURL1(null)
54
- }
55
- }
56
-
57
- const uploadName = (e) => {
58
- console.log(e.target.value)
59
-
60
- const files = e.target.value
61
- var upath = "/static/"
62
- const url = upath.concat(files)
63
-
64
-
65
- setImageURL(url)
66
- setImageURL2(url)
67
- setfile_video(url)
68
- setfile_img(null)
69
-
70
-
71
- }
72
-
73
- return (
74
- <div className="App">
75
- <h1 className='header'>PREVIEW</h1>
76
-
77
- <div className='inputHolder' >
78
- <form action="/upload" enctype="multipart/form-data" method="post" id='uploadform'>
79
- <div class="mb-3">
80
-
81
- <label for="formFile" class="form-label">Upload Image or Video to preview: Max(20mb/file)</label>
82
- <a href="https://drive.google.com/drive/folders/1Gslj0uO2CeLXsO9EhjJ0OepWWpyWb6yO?usp=sharing" target="_blank">Sample Pics to download</a>
83
- <input class="form-control" type='file' id="formFile" accepts='image/*, video/*' capture='camera' name='media'
84
- onChange={uploadImage}/>
85
-
86
- {imageURL1 && <button type="submit" class="btn btn-dark" value='SubmiT' >DETECT FALL </button>}
87
- </div>
88
-
89
- </form>
90
- <br></br>
91
- <div>
92
- <form action="/upload" enctype="multipart/form-data" method="post" id='uploadselect'>
93
- <label >Sample videos</label>
94
- <select name="options" class="form-select" aria-label="Default select example" onChange={uploadName}>
95
- <option selected>Open this select menu</option>
96
- <option value="skate.mp4">Skate and Fall</option>
97
- <option value="slip.mp4">Run and Fall</option>
98
- <option value="kitchen.mp4">Fall in kitchen</option>
99
- <option value="cafe_fall.mp4">Fall in workplace</option>
100
- <option value="studycam.mp4">Fall experiment</option>
101
- </select>
102
- {imageURL2 && <button type="submit" class="btn btn-dark" value='SubmiT' >DETECT FALL </button>}
103
- </form>
104
- </div>
105
- </div>
106
-
107
- <div className='mainWrapper'>
108
- <div className='mainContent'>
109
- <div className='imageholder'>
110
- {file_img && imageURL && <img src={imageURL} height='360' width='360' alt='Preview' crossOrigin='anonymous' ref={imageRef}/>}
111
- {file_video && imageURL && <video controls src={imageURL} height='360' width='360' alt='Preview' crossOrigin='anonymous' ref={imageRef} />}
112
-
113
- </div>
114
- </div>
115
-
116
- </div>
117
- </div>
118
- );
119
- }
120
-
121
- export default App;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/index.css DELETED
@@ -1,74 +0,0 @@
1
- body {
2
- margin: 0;
3
- font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
4
- 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
5
- sans-serif;
6
- -webkit-font-smoothing: antialiased;
7
- -moz-osx-font-smoothing: grayscale;
8
- background-color: #8b008bcf;
9
- }
10
-
11
- code {
12
- font-family: source-code-pro, Menlo, Monaco, Consolas, 'Courier New',
13
- monospace;
14
- }
15
-
16
- h1 {
17
- color: #FEFBEA;
18
- }
19
-
20
- b {
21
- color: #FEFBEA;
22
- }
23
-
24
- .square {
25
- height: 640px;
26
- width: 640px;
27
- background-color: #555;
28
- }
29
-
30
- .rcorners {
31
- border-radius: 25px;
32
- border: 2px solid #b494f4;
33
- background-color: rgb(175, 150, 226);
34
- margin: auto;
35
- padding: 20px;
36
- width: 80vw;
37
- height: 25vh;
38
- }
39
- .loader{
40
- position: fixed;
41
- top: 0;
42
- left: 0;
43
- width: 100vw;
44
- height: 100vh;
45
- display: flex;
46
- justify-content: center;
47
- align-items: center;
48
- background-color: #96958f;
49
- transition: opacity 0..75s, visibility 0.75s;
50
- }
51
-
52
- .loader-hidden{
53
- opacity: 0;
54
- visibility: hidden;
55
- }
56
-
57
- .loader-after{
58
- content: "";
59
- width: 75px;
60
- height: 75px;
61
- border: 15px solid #0d0d0d;
62
- border-top-color: #db62f4;
63
- border-radius: 50%;
64
- animation: loading 0.75s ease infinite;
65
- }
66
-
67
- @keyframes loading {
68
- from{
69
- transform: rotate(0turn);
70
- }
71
- to{
72
- transform: rotate(1turn);
73
- }
74
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/index.js DELETED
@@ -1,13 +0,0 @@
1
- import React from 'react';
2
- import ReactDOM from 'react-dom';
3
- import './index.css';
4
- import App from './App';
5
-
6
- ReactDOM.render(
7
- <React.StrictMode>
8
- <App />
9
- </React.StrictMode>,
10
- document.getElementById('root')
11
- );
12
-
13
-