Spaces:
Running
on
Zero
Running
on
Zero
try to fix get_point
Browse files
app.py
CHANGED
@@ -17,38 +17,32 @@ def preprocess_image(image):
|
|
17 |
def get_point(point_type, tracking_points, trackings_input_label, first_frame_path, evt: gr.SelectData):
|
18 |
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
|
19 |
|
20 |
-
tracking_points.
|
21 |
-
print(f"TRACKING POINT: {tracking_points
|
22 |
|
23 |
if point_type == "include":
|
24 |
-
trackings_input_label.
|
25 |
elif point_type == "exclude":
|
26 |
-
trackings_input_label.
|
27 |
-
print(f"TRACKING INPUT LABEL: {trackings_input_label
|
28 |
|
29 |
-
# Open the image and get its dimensions
|
30 |
transparent_background = Image.open(first_frame_path).convert('RGBA')
|
31 |
w, h = transparent_background.size
|
|
|
32 |
|
33 |
-
# Define the circle radius as a fraction of the smaller dimension
|
34 |
-
fraction = 0.02 # You can adjust this value as needed
|
35 |
-
radius = int(fraction * min(w, h))
|
36 |
-
|
37 |
-
# Create a transparent layer to draw on
|
38 |
transparent_layer = np.zeros((h, w, 4), dtype=np.uint8)
|
39 |
|
40 |
-
for index, track in enumerate(tracking_points
|
41 |
-
if trackings_input_label
|
42 |
cv2.circle(transparent_layer, track, radius, (0, 255, 0, 255), -1)
|
43 |
else:
|
44 |
cv2.circle(transparent_layer, track, radius, (255, 0, 0, 255), -1)
|
45 |
|
46 |
-
# Convert the transparent layer back to an image
|
47 |
transparent_layer = Image.fromarray(transparent_layer, 'RGBA')
|
48 |
selected_point_map = Image.alpha_composite(transparent_background, transparent_layer)
|
49 |
|
50 |
return tracking_points, trackings_input_label, selected_point_map
|
51 |
-
|
52 |
# use bfloat16 for the entire notebook
|
53 |
torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
|
54 |
|
|
|
17 |
def get_point(point_type, tracking_points, trackings_input_label, first_frame_path, evt: gr.SelectData):
|
18 |
print(f"You selected {evt.value} at {evt.index} from {evt.target}")
|
19 |
|
20 |
+
tracking_points.append(evt.index)
|
21 |
+
print(f"TRACKING POINT: {tracking_points}")
|
22 |
|
23 |
if point_type == "include":
|
24 |
+
trackings_input_label.append(1)
|
25 |
elif point_type == "exclude":
|
26 |
+
trackings_input_label.append(0)
|
27 |
+
print(f"TRACKING INPUT LABEL: {trackings_input_label}")
|
28 |
|
|
|
29 |
transparent_background = Image.open(first_frame_path).convert('RGBA')
|
30 |
w, h = transparent_background.size
|
31 |
+
radius = int(0.02 * min(w, h))
|
32 |
|
|
|
|
|
|
|
|
|
|
|
33 |
transparent_layer = np.zeros((h, w, 4), dtype=np.uint8)
|
34 |
|
35 |
+
for index, track in enumerate(tracking_points):
|
36 |
+
if trackings_input_label[index] == 1:
|
37 |
cv2.circle(transparent_layer, track, radius, (0, 255, 0, 255), -1)
|
38 |
else:
|
39 |
cv2.circle(transparent_layer, track, radius, (255, 0, 0, 255), -1)
|
40 |
|
|
|
41 |
transparent_layer = Image.fromarray(transparent_layer, 'RGBA')
|
42 |
selected_point_map = Image.alpha_composite(transparent_background, transparent_layer)
|
43 |
|
44 |
return tracking_points, trackings_input_label, selected_point_map
|
45 |
+
|
46 |
# use bfloat16 for the entire notebook
|
47 |
torch.autocast(device_type="cuda", dtype=torch.bfloat16).__enter__()
|
48 |
|