Spaces:
Runtime error
Runtime error
JeffLiang
commited on
Commit
•
0e5e403
1
Parent(s):
a0a0769
bug fix
Browse files- app.py +1 -1
- open_vocab_seg/utils/predictor.py +3 -3
- resources/demo_samples/sample_04.png +3 -0
app.py
CHANGED
@@ -54,7 +54,7 @@ def inference(class_names, proposal_gen, granularity, input_img):
|
|
54 |
|
55 |
|
56 |
examples = [['Saturn V, toys, desk, sunflowers, white roses, chrysanthemums, carnations, green dianthus', 'Segment_Anything', 0.8, './resources/demo_samples/sample_01.jpeg'],
|
57 |
-
['red bench, yellow bench,
|
58 |
['Saturn V, toys, blossom', 'MaskFormer', 1.0, './resources/demo_samples/sample_01.jpeg'],
|
59 |
['Oculus, Ukulele', 'MaskFormer', 1.0, './resources/demo_samples/sample_03.jpeg'],
|
60 |
['Golden gate, yacht', 'MaskFormer', 1.0, './resources/demo_samples/sample_02.jpeg'],]
|
|
|
54 |
|
55 |
|
56 |
examples = [['Saturn V, toys, desk, sunflowers, white roses, chrysanthemums, carnations, green dianthus', 'Segment_Anything', 0.8, './resources/demo_samples/sample_01.jpeg'],
|
57 |
+
['red bench, yellow bench, blue bench, brown bench, green bench, blue chair, yellow chair, green chair', 'Segment_Anything', 0.8, './resources/demo_samples/sample_04.png'],
|
58 |
['Saturn V, toys, blossom', 'MaskFormer', 1.0, './resources/demo_samples/sample_01.jpeg'],
|
59 |
['Oculus, Ukulele', 'MaskFormer', 1.0, './resources/demo_samples/sample_03.jpeg'],
|
60 |
['Golden gate, yacht', 'MaskFormer', 1.0, './resources/demo_samples/sample_02.jpeg'],]
|
open_vocab_seg/utils/predictor.py
CHANGED
@@ -150,7 +150,7 @@ class SAMVisualizationDemo(object):
|
|
150 |
|
151 |
self.parallel = parallel
|
152 |
self.granularity = granularity
|
153 |
-
sam = sam_model_registry["vit_h"](checkpoint=sam_path)
|
154 |
self.predictor = SamAutomaticMaskGenerator(sam)
|
155 |
self.clip_model, _, _ = open_clip.create_model_and_transforms('ViT-L-14', pretrained=ovsegclip_path)
|
156 |
self.clip_model.cuda()
|
@@ -190,8 +190,8 @@ class SAMVisualizationDemo(object):
|
|
190 |
text = open_clip.tokenize(txts)
|
191 |
|
192 |
with torch.no_grad(), torch.cuda.amp.autocast():
|
193 |
-
image_features = self.clip_model.encode_image(imgs)
|
194 |
-
text_features = self.clip_model.encode_text(text)
|
195 |
image_features /= image_features.norm(dim=-1, keepdim=True)
|
196 |
text_features /= text_features.norm(dim=-1, keepdim=True)
|
197 |
|
|
|
150 |
|
151 |
self.parallel = parallel
|
152 |
self.granularity = granularity
|
153 |
+
sam = sam_model_registry["vit_h"](checkpoint=sam_path).cuda()
|
154 |
self.predictor = SamAutomaticMaskGenerator(sam)
|
155 |
self.clip_model, _, _ = open_clip.create_model_and_transforms('ViT-L-14', pretrained=ovsegclip_path)
|
156 |
self.clip_model.cuda()
|
|
|
190 |
text = open_clip.tokenize(txts)
|
191 |
|
192 |
with torch.no_grad(), torch.cuda.amp.autocast():
|
193 |
+
image_features = self.clip_model.encode_image(imgs.half())
|
194 |
+
text_features = self.clip_model.encode_text(text.half())
|
195 |
image_features /= image_features.norm(dim=-1, keepdim=True)
|
196 |
text_features /= text_features.norm(dim=-1, keepdim=True)
|
197 |
|
resources/demo_samples/sample_04.png
ADDED
Git LFS Details
|