Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -8,8 +8,8 @@ from transformers import (
|
|
8 |
AutoTokenizer,
|
9 |
BlipForConditionalGeneration,
|
10 |
BlipProcessor,
|
11 |
-
|
12 |
-
|
13 |
SamModel,
|
14 |
SamProcessor,
|
15 |
)
|
@@ -85,8 +85,8 @@ def run_image_captioner(image, device):
|
|
85 |
def run_segmentation(image, object_to_segment, device):
|
86 |
# OWL-V2 for object detection
|
87 |
owl_v2_model_id = "google/owlv2-base-patch16-ensemble"
|
88 |
-
processor = Owlv2Processor.from_pretrained(
|
89 |
-
od_model = Owlv2ForObjectDetection.from_pretrained(
|
90 |
text_queries = [object_to_segment]
|
91 |
inputs = processor(text=text_queries, images=image, return_tensors="pt").to(device)
|
92 |
with torch.no_grad():
|
|
|
8 |
AutoTokenizer,
|
9 |
BlipForConditionalGeneration,
|
10 |
BlipProcessor,
|
11 |
+
OwlV2ForObjectDetection,
|
12 |
+
OwlV2Processor,
|
13 |
SamModel,
|
14 |
SamProcessor,
|
15 |
)
|
|
|
85 |
def run_segmentation(image, object_to_segment, device):
|
86 |
# OWL-V2 for object detection
|
87 |
owl_v2_model_id = "google/owlv2-base-patch16-ensemble"
|
88 |
+
processor = Owlv2Processor.from_pretrained(owl_v2_model_id)
|
89 |
+
od_model = Owlv2ForObjectDetection.from_pretrained(owl_v2_model_id).to(device)
|
90 |
text_queries = [object_to_segment]
|
91 |
inputs = processor(text=text_queries, images=image, return_tensors="pt").to(device)
|
92 |
with torch.no_grad():
|