Spaces:
Runtime error
Runtime error
Diego Fernandez
commited on
Commit
•
7204716
1
Parent(s):
070b3ef
chore: improve Norfair parameters for examples
Browse files- app.py +0 -1
- demo_utils/configuration.py +7 -2
- demo_utils/distance_function.py +0 -54
- demo_utils/files.py +0 -8
- examples/{traffic_1_C.mp4 → traffic_1_A.mp4} +2 -2
- inference.py +35 -24
app.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
import os
|
2 |
-
import tempfile
|
3 |
|
4 |
import gradio as gr
|
5 |
|
|
|
1 |
import os
|
|
|
2 |
|
3 |
import gradio as gr
|
4 |
|
demo_utils/configuration.py
CHANGED
@@ -1,7 +1,12 @@
|
|
1 |
-
|
2 |
-
DISTANCE_THRESHOLD_CENTROID: int = 30
|
3 |
MAX_DISTANCE: int = 10000
|
4 |
|
5 |
models_path = {"YOLOv7": "custom_models/yolov7.pt", "YOLOv7 Tiny": "custom_models/yolov7-tiny.pt"}
|
6 |
|
7 |
style = {"Bounding box": "bbox", "Centroid": "centroid"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
DISTANCE_THRESHOLD_CENTROID: int = 0.08
|
|
|
2 |
MAX_DISTANCE: int = 10000
|
3 |
|
4 |
models_path = {"YOLOv7": "custom_models/yolov7.pt", "YOLOv7 Tiny": "custom_models/yolov7-tiny.pt"}
|
5 |
|
6 |
style = {"Bounding box": "bbox", "Centroid": "centroid"}
|
7 |
+
|
8 |
+
examples = {
|
9 |
+
"soccer": {"distance_threshold": 0.1, "absolute_path": True, "classes": [0]},
|
10 |
+
"oxford_town_center": {"distance_threshold": 0.05, "absolute_path": False, "classes": [0]},
|
11 |
+
"traffic_1_A": {"distance_threshold": 0.1, "absolute_path": False, "classes": [2, 3, 5, 7]},
|
12 |
+
}
|
demo_utils/distance_function.py
DELETED
@@ -1,54 +0,0 @@
|
|
1 |
-
import numpy as np
|
2 |
-
import torch
|
3 |
-
import torchvision.ops.boxes as bops
|
4 |
-
|
5 |
-
from demo_utils.configuration import MAX_DISTANCE
|
6 |
-
|
7 |
-
|
8 |
-
def euclidean_distance(detection, tracked_object):
|
9 |
-
return np.linalg.norm(detection.points - tracked_object.estimate)
|
10 |
-
|
11 |
-
|
12 |
-
def iou_pytorch(detection, tracked_object):
|
13 |
-
# Slower but simplier version of iou
|
14 |
-
|
15 |
-
detection_points = np.concatenate([detection.points[0], detection.points[1]])
|
16 |
-
tracked_object_points = np.concatenate([tracked_object.estimate[0], tracked_object.estimate[1]])
|
17 |
-
|
18 |
-
box_a = torch.tensor([detection_points], dtype=torch.float)
|
19 |
-
box_b = torch.tensor([tracked_object_points], dtype=torch.float)
|
20 |
-
iou = bops.box_iou(box_a, box_b)
|
21 |
-
|
22 |
-
# Since 0 <= IoU <= 1, we define 1/IoU as a distance.
|
23 |
-
# Distance values will be in [1, inf)
|
24 |
-
return np.float(1 / iou if iou else MAX_DISTANCE)
|
25 |
-
|
26 |
-
|
27 |
-
def iou(detection, tracked_object):
|
28 |
-
# Detection points will be box A
|
29 |
-
# Tracked objects point will be box B.
|
30 |
-
|
31 |
-
box_a = np.concatenate([detection.points[0], detection.points[1]])
|
32 |
-
box_b = np.concatenate([tracked_object.estimate[0], tracked_object.estimate[1]])
|
33 |
-
|
34 |
-
x_a = max(box_a[0], box_b[0])
|
35 |
-
y_a = max(box_a[1], box_b[1])
|
36 |
-
x_b = min(box_a[2], box_b[2])
|
37 |
-
y_b = min(box_a[3], box_b[3])
|
38 |
-
|
39 |
-
# Compute the area of intersection rectangle
|
40 |
-
inter_area = max(0, x_b - x_a + 1) * max(0, y_b - y_a + 1)
|
41 |
-
|
42 |
-
# Compute the area of both the prediction and tracker
|
43 |
-
# rectangles
|
44 |
-
box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
|
45 |
-
box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1)
|
46 |
-
|
47 |
-
# Compute the intersection over union by taking the intersection
|
48 |
-
# area and dividing it by the sum of prediction + tracker
|
49 |
-
# areas - the interesection area
|
50 |
-
iou = inter_area / float(box_a_area + box_b_area - inter_area)
|
51 |
-
|
52 |
-
# Since 0 <= IoU <= 1, we define 1/IoU as a distance.
|
53 |
-
# Distance values will be in [1, inf)
|
54 |
-
return 1 / iou if iou else (MAX_DISTANCE)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
demo_utils/files.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
from typing import List
|
3 |
-
|
4 |
-
|
5 |
-
def get_files(folder: str) -> List:
|
6 |
-
files_list = [file.split(".")[0] for file in os.listdir(folder)]
|
7 |
-
|
8 |
-
return files_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
examples/{traffic_1_C.mp4 → traffic_1_A.mp4}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fec3593a2a30bbcf0c614597dbd64ce5065d27d4ca0a13f3b444a44b5cc289ef
|
3 |
+
size 32455820
|
inference.py
CHANGED
@@ -1,20 +1,16 @@
|
|
1 |
-
import os
|
2 |
-
import tempfile
|
3 |
-
|
4 |
import numpy as np
|
5 |
from norfair import AbsolutePaths, Paths, Tracker, Video
|
6 |
from norfair.camera_motion import HomographyTransformationGetter, MotionEstimator
|
|
|
7 |
|
8 |
from custom_models import YOLO, yolo_detections_to_norfair_detections
|
9 |
from demo_utils.configuration import (
|
10 |
-
DISTANCE_THRESHOLD_BBOX,
|
11 |
DISTANCE_THRESHOLD_CENTROID,
|
|
|
12 |
models_path,
|
13 |
style,
|
14 |
)
|
15 |
-
from demo_utils.distance_function import euclidean_distance, iou
|
16 |
from demo_utils.draw import center, draw
|
17 |
-
from demo_utils.files import get_files
|
18 |
|
19 |
|
20 |
def inference(
|
@@ -24,12 +20,10 @@ def inference(
|
|
24 |
track_points: str = "Bounding box",
|
25 |
model_threshold: float = 0.25,
|
26 |
):
|
27 |
-
# temp_dir = tempfile.TemporaryDirectory()
|
28 |
-
# output_path = temp_dir.name
|
29 |
-
|
30 |
coord_transformations = None
|
31 |
paths_drawer = None
|
32 |
fix_paths = False
|
|
|
33 |
track_points = style[track_points]
|
34 |
model = YOLO(models_path[model])
|
35 |
video = Video(input_path=input_video)
|
@@ -42,9 +36,6 @@ def inference(
|
|
42 |
features[0] == 1 or (len(features) > 1 and features[1] == 1)
|
43 |
)
|
44 |
|
45 |
-
if motion_estimation and drawing_paths:
|
46 |
-
fix_paths = True
|
47 |
-
|
48 |
if motion_estimation:
|
49 |
transformations_getter = HomographyTransformationGetter()
|
50 |
|
@@ -52,10 +43,24 @@ def inference(
|
|
52 |
max_points=500, min_distance=7, transformations_getter=transformations_getter
|
53 |
)
|
54 |
|
55 |
-
distance_function =
|
56 |
-
|
57 |
-
DISTANCE_THRESHOLD_BBOX if track_points == "bbox" else DISTANCE_THRESHOLD_CENTROID
|
58 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
tracker = Tracker(
|
61 |
distance_function=distance_function,
|
@@ -66,18 +71,17 @@ def inference(
|
|
66 |
paths_drawer = Paths(center, attenuation=0.01)
|
67 |
|
68 |
if fix_paths:
|
69 |
-
paths_drawer = AbsolutePaths(max_history=
|
70 |
|
71 |
for frame in video:
|
72 |
yolo_detections = model(
|
73 |
-
frame,
|
|
|
|
|
|
|
|
|
74 |
)
|
75 |
|
76 |
-
mask = np.ones(frame.shape[:2], frame.dtype)
|
77 |
-
|
78 |
-
if motion_estimation:
|
79 |
-
coord_transformations = motion_estimator.update(frame, mask)
|
80 |
-
|
81 |
detections = yolo_detections_to_norfair_detections(
|
82 |
yolo_detections, track_points=track_points
|
83 |
)
|
@@ -86,6 +90,15 @@ def inference(
|
|
86 |
detections=detections, coord_transformations=coord_transformations
|
87 |
)
|
88 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
frame = draw(
|
90 |
paths_drawer,
|
91 |
track_points,
|
@@ -99,7 +112,5 @@ def inference(
|
|
99 |
|
100 |
base_file_name = input_video.split("/")[-1].split(".")[0]
|
101 |
file_name = base_file_name + "_out.mp4"
|
102 |
-
|
103 |
-
# return os.path.join(output_path, file_name)
|
104 |
|
105 |
return file_name
|
|
|
|
|
|
|
|
|
1 |
import numpy as np
|
2 |
from norfair import AbsolutePaths, Paths, Tracker, Video
|
3 |
from norfair.camera_motion import HomographyTransformationGetter, MotionEstimator
|
4 |
+
from norfair.distances import create_normalized_mean_euclidean_distance
|
5 |
|
6 |
from custom_models import YOLO, yolo_detections_to_norfair_detections
|
7 |
from demo_utils.configuration import (
|
|
|
8 |
DISTANCE_THRESHOLD_CENTROID,
|
9 |
+
examples,
|
10 |
models_path,
|
11 |
style,
|
12 |
)
|
|
|
13 |
from demo_utils.draw import center, draw
|
|
|
14 |
|
15 |
|
16 |
def inference(
|
|
|
20 |
track_points: str = "Bounding box",
|
21 |
model_threshold: float = 0.25,
|
22 |
):
|
|
|
|
|
|
|
23 |
coord_transformations = None
|
24 |
paths_drawer = None
|
25 |
fix_paths = False
|
26 |
+
classes = None
|
27 |
track_points = style[track_points]
|
28 |
model = YOLO(models_path[model])
|
29 |
video = Video(input_path=input_video)
|
|
|
36 |
features[0] == 1 or (len(features) > 1 and features[1] == 1)
|
37 |
)
|
38 |
|
|
|
|
|
|
|
39 |
if motion_estimation:
|
40 |
transformations_getter = HomographyTransformationGetter()
|
41 |
|
|
|
43 |
max_points=500, min_distance=7, transformations_getter=transformations_getter
|
44 |
)
|
45 |
|
46 |
+
distance_function = create_normalized_mean_euclidean_distance(
|
47 |
+
video.input_height, video.input_width
|
|
|
48 |
)
|
49 |
+
distance_threshold = DISTANCE_THRESHOLD_CENTROID
|
50 |
+
|
51 |
+
if motion_estimation and drawing_paths:
|
52 |
+
fix_paths = True
|
53 |
+
|
54 |
+
# Examples configuration
|
55 |
+
for example in examples:
|
56 |
+
if example not in input_video:
|
57 |
+
continue
|
58 |
+
fix_paths = examples[example]["absolute_path"]
|
59 |
+
distance_threshold = examples[example]["distance_threshold"]
|
60 |
+
classes = examples[example]["classes"]
|
61 |
+
|
62 |
+
print(f"Set config to {example}: {fix_paths} {distance_threshold} {classes}")
|
63 |
+
break
|
64 |
|
65 |
tracker = Tracker(
|
66 |
distance_function=distance_function,
|
|
|
71 |
paths_drawer = Paths(center, attenuation=0.01)
|
72 |
|
73 |
if fix_paths:
|
74 |
+
paths_drawer = AbsolutePaths(max_history=50, thickness=2)
|
75 |
|
76 |
for frame in video:
|
77 |
yolo_detections = model(
|
78 |
+
frame,
|
79 |
+
conf_threshold=model_threshold,
|
80 |
+
iou_threshold=0.45,
|
81 |
+
image_size=720,
|
82 |
+
classes=classes,
|
83 |
)
|
84 |
|
|
|
|
|
|
|
|
|
|
|
85 |
detections = yolo_detections_to_norfair_detections(
|
86 |
yolo_detections, track_points=track_points
|
87 |
)
|
|
|
90 |
detections=detections, coord_transformations=coord_transformations
|
91 |
)
|
92 |
|
93 |
+
mask = np.ones(frame.shape[:2], frame.dtype)
|
94 |
+
if track_points == "bbox":
|
95 |
+
for det in detections:
|
96 |
+
i = det.points.astype(int)
|
97 |
+
mask[i[0, 1] : i[1, 1], i[0, 0] : i[1, 0]] = 0
|
98 |
+
|
99 |
+
if motion_estimation:
|
100 |
+
coord_transformations = motion_estimator.update(frame, mask)
|
101 |
+
|
102 |
frame = draw(
|
103 |
paths_drawer,
|
104 |
track_points,
|
|
|
112 |
|
113 |
base_file_name = input_video.split("/")[-1].split(".")[0]
|
114 |
file_name = base_file_name + "_out.mp4"
|
|
|
|
|
115 |
|
116 |
return file_name
|