Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -19,7 +19,7 @@ def segment_everything(image):
|
|
19 |
if isinstance(image, np.ndarray):
|
20 |
image = Image.fromarray(image)
|
21 |
|
22 |
-
inputs = processor(text=["object"], images=
|
23 |
with torch.no_grad():
|
24 |
outputs = model(**inputs)
|
25 |
preds = outputs.logits.squeeze().sigmoid().cpu()
|
@@ -35,7 +35,7 @@ def segment_box(image, x1, y1, x2, y2):
|
|
35 |
|
36 |
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
37 |
cropped_image = image[y1:y2, x1:x2]
|
38 |
-
inputs = processor(text=["object"], images=
|
39 |
with torch.no_grad():
|
40 |
outputs = model(**inputs)
|
41 |
preds = outputs.logits.squeeze().sigmoid().cpu()
|
@@ -57,7 +57,7 @@ def update_image(image, segmentation):
|
|
57 |
|
58 |
seg_pil = Image.fromarray(segmentation).convert('RGBA')
|
59 |
|
60 |
-
if image_pil.size
|
61 |
seg_pil = seg_pil.resize(image_pil.size, Image.NEAREST)
|
62 |
|
63 |
blended = Image.blend(image_pil.convert('RGBA'), seg_pil, 0.5)
|
@@ -96,4 +96,4 @@ with gr.Blocks() as demo:
|
|
96 |
outputs=[output_image]
|
97 |
)
|
98 |
|
99 |
-
demo.launch()
|
|
|
19 |
if isinstance(image, np.ndarray):
|
20 |
image = Image.fromarray(image)
|
21 |
|
22 |
+
inputs = processor(text=["object"], images=image, padding="max_length", return_tensors="pt").to(device)
|
23 |
with torch.no_grad():
|
24 |
outputs = model(**inputs)
|
25 |
preds = outputs.logits.squeeze().sigmoid().cpu()
|
|
|
35 |
|
36 |
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
|
37 |
cropped_image = image[y1:y2, x1:x2]
|
38 |
+
inputs = processor(text=["object"], images=Image.fromarray(cropped_image), padding="max_length", return_tensors="pt").to(device)
|
39 |
with torch.no_grad():
|
40 |
outputs = model(**inputs)
|
41 |
preds = outputs.logits.squeeze().sigmoid().cpu()
|
|
|
57 |
|
58 |
seg_pil = Image.fromarray(segmentation).convert('RGBA')
|
59 |
|
60 |
+
if image_pil.size!= seg_pil.size:
|
61 |
seg_pil = seg_pil.resize(image_pil.size, Image.NEAREST)
|
62 |
|
63 |
blended = Image.blend(image_pil.convert('RGBA'), seg_pil, 0.5)
|
|
|
96 |
outputs=[output_image]
|
97 |
)
|
98 |
|
99 |
+
demo.launch()
|