jadechoghari commited on
Commit
a4a6a96
·
verified ·
1 Parent(s): c73b59b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -24
app.py CHANGED
@@ -15,7 +15,7 @@ sam_processor = SamProcessor.from_pretrained("jadechoghari/robustsam-vit-base")
15
 
16
  def apply_mask(image, mask, color):
17
  """Apply a mask to an image with a specific color."""
18
- for c in range(3): # iterate over rgb channels
19
  image[:, :, c] = np.where(mask, color[c], image[:, :, c])
20
  return image
21
 
@@ -59,33 +59,42 @@ def query(image, texts, threshold):
59
  inputs["reshaped_input_sizes"].cpu()
60
  )[0][0][0].numpy()
61
 
62
- # we apply the mask with the corresponding color
63
- color = colors[i % len(colors)] # we cycle through colors
64
  image = apply_mask(image, mask > 0.5, color)
65
 
66
  result_image = Image.fromarray(image)
67
 
68
  return result_image
69
 
70
- description = (
71
- "Welcome to RobustSAM by Snap Research."
72
- "This Space uses RobustSAM, a robust version of the Segment Anything Model (SAM) with improved performance on low-quality images while maintaining zero-shot segmentation capabilities. "
73
- "Thanks to its integration with OWLv2, RobustSAM becomes text-promptable, allowing for flexible and accurate segmentation, even with degraded image quality. Try the example or input an image with comma-separated candidate labels to see the enhanced segmentation results."
74
- )
75
-
76
- demo = gr.Interface(
77
- query,
78
- inputs=[gr.Image(type="pil", label="Image Input"), gr.Textbox(label="Candidate Labels"), gr.Slider(0, 1, value=0.05, label="Confidence Threshold")],
79
- outputs=gr.Image(type="pil", label="Segmented Image"),
80
- title="RobustSAM",
81
- description=description,
82
- examples=[
 
 
 
 
 
 
 
 
 
 
 
 
83
  ["./blur.jpg", "insect", 0.1],
84
- ["./lowlight.jpg", "bus, window", 0.1],
85
- ["./rain.jpg", "tree, leafs", 0.1],
86
- ["./haze.jpg", "", 0.1],
87
- ],
88
- cache_examples=True
89
- )
90
-
91
- demo.launch()
 
15
 
16
  def apply_mask(image, mask, color):
17
  """Apply a mask to an image with a specific color."""
18
+ for c in range(3): # Iterate over RGB channels
19
  image[:, :, c] = np.where(mask, color[c], image[:, :, c])
20
  return image
21
 
 
59
  inputs["reshaped_input_sizes"].cpu()
60
  )[0][0][0].numpy()
61
 
62
+ color = colors[i % len(colors)] # cycle through colors
 
63
  image = apply_mask(image, mask > 0.5, color)
64
 
65
  result_image = Image.fromarray(image)
66
 
67
  return result_image
68
 
69
+ title = """
70
+ # RobustSAM
71
+ """
72
+
73
+ description = """
74
+ **Welcome to RobustSAM by Snap Research.**
75
+
76
+ This Space uses **RobustSAM**, a robust version of the Segment Anything Model (SAM) with improved performance on low-quality images while maintaining zero-shot segmentation capabilities.
77
+
78
+ Thanks to its integration with **OWLv2**, RobustSAM becomes text-promptable, allowing for flexible and accurate segmentation, even with degraded image quality.
79
+
80
+ Try the example or input an image with comma-separated candidate labels to see the enhanced segmentation results.
81
+
82
+ For better results, please check the [GitHub repository](https://github.com/robustsam/RobustSAM).
83
+ """
84
+
85
+ with gr.Blocks() as demo:
86
+ gr.Markdown(title)
87
+ gr.Markdown(description)
88
+
89
+ gr.Interface(
90
+ query,
91
+ inputs=[gr.Image(type="pil", label="Image Input"), gr.Textbox(label="Candidate Labels"), gr.Slider(0, 1, value=0.05, label="Confidence Threshold")],
92
+ outputs=gr.Image(type="pil", label="Segmented Image"),
93
+ examples=[
94
  ["./blur.jpg", "insect", 0.1],
95
+ ["./lowlight.jpg", "bus, window", 0.1]
96
+ ],
97
+ cache_examples=True
98
+ )
99
+
100
+ demo.launch()