Spaces:
Running
on
Zero
Running
on
Zero
debug
Browse files- examples/blobctrl/blobctrl_app.py +10 -7
- requirements.txt +2 -2
examples/blobctrl/blobctrl_app.py
CHANGED
@@ -114,7 +114,7 @@ blobnet = BlobNetModel.from_pretrained(blobnet_path, ignore_mismatched_sizes=Tru
|
|
114 |
## sam
|
115 |
print(f"Loading SAM...")
|
116 |
mobile_sam = sam_model_registry['vit_h'](checkpoint=sam_path).to(device)
|
117 |
-
mobile_sam.eval()
|
118 |
mobile_predictor = SamPredictor(mobile_sam)
|
119 |
colors = [(255, 0, 0), (0, 255, 0)]
|
120 |
markers = [1, 5]
|
@@ -1030,14 +1030,17 @@ def segmentation(img, sel_pix):
|
|
1030 |
mobile_predictor.set_image(img if isinstance(img, np.ndarray) else np.array(img))
|
1031 |
with torch.no_grad():
|
1032 |
masks, _, _ = mobile_predictor.predict(point_coords=np.array(points), point_labels=np.array(labels), multimask_output=False)
|
1033 |
-
print("
|
1034 |
print(img)
|
1035 |
print(img.shape)
|
|
|
1036 |
print(points)
|
1037 |
print(labels)
|
|
|
1038 |
print(masks)
|
1039 |
print(np.unique(masks))
|
1040 |
print("================")
|
|
|
1041 |
output_mask = np.ones((masks.shape[1], masks.shape[2], 3))*255
|
1042 |
for i in range(3):
|
1043 |
output_mask[masks[0] == True, i] = 0.0
|
@@ -1059,11 +1062,11 @@ def get_point(img, sel_pix, evt: gr.SelectData):
|
|
1059 |
# online show seg mask
|
1060 |
print(evt.index)
|
1061 |
masked_img, output_mask = segmentation(img, sel_pix)
|
1062 |
-
print(masked_img.shape)
|
1063 |
-
print(output_mask.shape)
|
1064 |
-
print(masked_img)
|
1065 |
-
print(output_mask)
|
1066 |
-
print(np.unique(output_mask))
|
1067 |
|
1068 |
return masked_img.astype(np.uint8), output_mask
|
1069 |
|
|
|
114 |
## sam
|
115 |
print(f"Loading SAM...")
|
116 |
mobile_sam = sam_model_registry['vit_h'](checkpoint=sam_path).to(device)
|
117 |
+
# mobile_sam.eval()
|
118 |
mobile_predictor = SamPredictor(mobile_sam)
|
119 |
colors = [(255, 0, 0), (0, 255, 0)]
|
120 |
markers = [1, 5]
|
|
|
1030 |
mobile_predictor.set_image(img if isinstance(img, np.ndarray) else np.array(img))
|
1031 |
with torch.no_grad():
|
1032 |
masks, _, _ = mobile_predictor.predict(point_coords=np.array(points), point_labels=np.array(labels), multimask_output=False)
|
1033 |
+
print("=======img=========")
|
1034 |
print(img)
|
1035 |
print(img.shape)
|
1036 |
+
print("=======points and labels=========")
|
1037 |
print(points)
|
1038 |
print(labels)
|
1039 |
+
print("=======masks=========")
|
1040 |
print(masks)
|
1041 |
print(np.unique(masks))
|
1042 |
print("================")
|
1043 |
+
print(mobile_predictor)
|
1044 |
output_mask = np.ones((masks.shape[1], masks.shape[2], 3))*255
|
1045 |
for i in range(3):
|
1046 |
output_mask[masks[0] == True, i] = 0.0
|
|
|
1062 |
# online show seg mask
|
1063 |
print(evt.index)
|
1064 |
masked_img, output_mask = segmentation(img, sel_pix)
|
1065 |
+
# print(masked_img.shape)
|
1066 |
+
# print(output_mask.shape)
|
1067 |
+
# print(masked_img)
|
1068 |
+
# print(output_mask)
|
1069 |
+
# print(np.unique(output_mask))
|
1070 |
|
1071 |
return masked_img.astype(np.uint8), output_mask
|
1072 |
|
requirements.txt
CHANGED
@@ -4,10 +4,10 @@ accelerate==1.5.2
|
|
4 |
huggingface_hub==0.29.3
|
5 |
gradio==5.21.0
|
6 |
opencv-python==4.8.1.78
|
7 |
-
numpy==1.26.
|
8 |
einops==0.8.1
|
9 |
matplotlib==3.10.1
|
10 |
-
segment_anything
|
11 |
torch==2.2.0
|
12 |
torchvision==0.17.0
|
13 |
torchaudio==2.2.0
|
|
|
4 |
huggingface_hub==0.29.3
|
5 |
gradio==5.21.0
|
6 |
opencv-python==4.8.1.78
|
7 |
+
numpy==1.26.2
|
8 |
einops==0.8.1
|
9 |
matplotlib==3.10.1
|
10 |
+
segment_anything==1.0
|
11 |
torch==2.2.0
|
12 |
torchvision==0.17.0
|
13 |
torchaudio==2.2.0
|