Spaces:
Running
on
A10G
Running
on
A10G
added frame transitions at 0.3 opacity
Browse files- owl_core.py +28 -5
- utils.py +15 -3
owl_core.py
CHANGED
@@ -76,21 +76,29 @@ def owl_full_video(
|
|
76 |
frame_filenames = os.listdir(frames_dir)
|
77 |
|
78 |
frame_paths = [] # list of frame paths to process based on fps_processed
|
|
|
|
|
|
|
|
|
|
|
79 |
# for every frame processed, add to frame_paths
|
80 |
for i, frame in enumerate(frame_filenames):
|
|
|
81 |
if i % fps_processed == 0:
|
82 |
-
|
|
|
|
|
|
|
|
|
83 |
|
84 |
# set up df for results
|
85 |
df = pd.DataFrame(columns=["frame", "boxes", "scores", "labels"])
|
86 |
|
87 |
-
# for positive detection frames whether the directory has been created
|
88 |
-
dir_created = False
|
89 |
-
|
90 |
# run owl in batches
|
91 |
for i in tqdm(range(0, len(frame_paths), batch_size), desc="Running batches"):
|
92 |
frame_nums = [i*fps_processed for i in range(batch_size)]
|
93 |
batch_paths = frame_paths[i:i+batch_size] # paths for this batch
|
|
|
94 |
images = [Image.open(image_path) for image_path in batch_paths]
|
95 |
|
96 |
# run owl on this batch of frames
|
@@ -125,13 +133,28 @@ def owl_full_video(
|
|
125 |
if labels[j] is not None:
|
126 |
annotated_frame = plot_predictions(image, labels[j], scores, boxes)
|
127 |
cv2.imwrite(image, annotated_frame)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
128 |
|
129 |
# save the df to a csv
|
130 |
csv_path = f"{results_dir}/{filename}_{threshold}.csv"
|
131 |
df.to_csv(csv_path, index=False)
|
132 |
|
133 |
# stitch the frames into a video
|
134 |
-
save_path = vid_stitcher(frames_dir, output_path=os.path.join(results_dir, "output.mp4"))
|
135 |
|
136 |
return csv_path, save_path
|
137 |
|
|
|
76 |
frame_filenames = os.listdir(frames_dir)
|
77 |
|
78 |
frame_paths = [] # list of frame paths to process based on fps_processed
|
79 |
+
|
80 |
+
# TESTING OUT FADING OUT THE ANNOTATED BOX BETWEEN FRAMES
|
81 |
+
annotation_guide = {}
|
82 |
+
last_frame_run = frame_filenames[0]
|
83 |
+
|
84 |
# for every frame processed, add to frame_paths
|
85 |
for i, frame in enumerate(frame_filenames):
|
86 |
+
path = os.path.join(frames_dir, frame)
|
87 |
if i % fps_processed == 0:
|
88 |
+
last_frame_run = path
|
89 |
+
frame_paths.append(path)
|
90 |
+
annotation_guide[path] = [] # TESTING
|
91 |
+
else:
|
92 |
+
annotation_guide[last_frame_run].append(path) # TESTING
|
93 |
|
94 |
# set up df for results
|
95 |
df = pd.DataFrame(columns=["frame", "boxes", "scores", "labels"])
|
96 |
|
|
|
|
|
|
|
97 |
# run owl in batches
|
98 |
for i in tqdm(range(0, len(frame_paths), batch_size), desc="Running batches"):
|
99 |
frame_nums = [i*fps_processed for i in range(batch_size)]
|
100 |
batch_paths = frame_paths[i:i+batch_size] # paths for this batch
|
101 |
+
filenames = [os.path.basename(p) for p in batch_paths]
|
102 |
images = [Image.open(image_path) for image_path in batch_paths]
|
103 |
|
104 |
# run owl on this batch of frames
|
|
|
133 |
if labels[j] is not None:
|
134 |
annotated_frame = plot_predictions(image, labels[j], scores, boxes)
|
135 |
cv2.imwrite(image, annotated_frame)
|
136 |
+
|
137 |
+
# annotate all other frames with no detections
|
138 |
+
for key in annotation_guide:
|
139 |
+
labels = df[df["frame"] == key]["labels"].tolist()[0]
|
140 |
+
boxes = df[df["frame"] == key]["boxes"].tolist()[0]
|
141 |
+
scores = df[df["frame"] == key]["scores"].tolist()[0]
|
142 |
+
|
143 |
+
print(labels)
|
144 |
+
# Flatten nested lists if necessary
|
145 |
+
if not labels:
|
146 |
+
continue
|
147 |
+
|
148 |
+
for frame in annotation_guide[key]:
|
149 |
+
annotated_frame = plot_predictions(frame, labels, scores, boxes, opacity=0.3)
|
150 |
+
cv2.imwrite(frame, annotated_frame)
|
151 |
|
152 |
# save the df to a csv
|
153 |
csv_path = f"{results_dir}/{filename}_{threshold}.csv"
|
154 |
df.to_csv(csv_path, index=False)
|
155 |
|
156 |
# stitch the frames into a video
|
157 |
+
save_path = vid_stitcher(frames_dir, output_path=os.path.join(results_dir, "output.mp4"), fps=fps)
|
158 |
|
159 |
return csv_path, save_path
|
160 |
|
utils.py
CHANGED
@@ -14,6 +14,7 @@ def plot_predictions(
|
|
14 |
labels: list[str],
|
15 |
scores: list[float],
|
16 |
boxes: list[float],
|
|
|
17 |
) -> np.ndarray:
|
18 |
|
19 |
image_source = cv2.imread(image)
|
@@ -34,9 +35,20 @@ def plot_predictions(
|
|
34 |
|
35 |
bbox_annotator = sv.BoxAnnotator(color_lookup=sv.ColorLookup.INDEX, thickness=thickness)
|
36 |
label_annotator = sv.LabelAnnotator(color_lookup=sv.ColorLookup.INDEX, text_scale=text_scale, text_thickness=text_thickness)
|
37 |
-
|
38 |
-
|
39 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
return annotated_frame
|
42 |
|
|
|
14 |
labels: list[str],
|
15 |
scores: list[float],
|
16 |
boxes: list[float],
|
17 |
+
opacity: float = 1.0
|
18 |
) -> np.ndarray:
|
19 |
|
20 |
image_source = cv2.imread(image)
|
|
|
35 |
|
36 |
bbox_annotator = sv.BoxAnnotator(color_lookup=sv.ColorLookup.INDEX, thickness=thickness)
|
37 |
label_annotator = sv.LabelAnnotator(color_lookup=sv.ColorLookup.INDEX, text_scale=text_scale, text_thickness=text_thickness)
|
38 |
+
|
39 |
+
# Create a semi-transparent overlay
|
40 |
+
overlay = image_source.copy()
|
41 |
+
|
42 |
+
# Apply bounding box annotations to the overlay
|
43 |
+
overlay = bbox_annotator.annotate(scene=overlay, detections=boxes)
|
44 |
+
overlay = label_annotator.annotate(scene=overlay, detections=boxes, labels=labels)
|
45 |
+
|
46 |
+
# annotated_frame = cv2.cvtColor(image_source, cv2.COLOR_RGB2BGR)
|
47 |
+
# annotated_frame = bbox_annotator.annotate(scene=annotated_frame, detections=boxes)
|
48 |
+
# annotated_frame = label_annotator.annotate(scene=annotated_frame, detections=boxes, labels=labels)
|
49 |
+
|
50 |
+
# Blend overlay with original image using the specified opacity
|
51 |
+
annotated_frame = cv2.addWeighted(overlay, opacity, image_source, 1 - opacity, 0)
|
52 |
|
53 |
return annotated_frame
|
54 |
|