File size: 1,627 Bytes
429f61d
 
 
 
 
 
9a86e9d
849c851
975ae87
429f61d
 
9a86e9d
 
 
 
 
 
 
 
 
 
 
 
429f61d
9a86e9d
 
 
429f61d
9a86e9d
 
429f61d
9a86e9d
 
 
 
 
429f61d
 
9a86e9d
429f61d
 
 
 
 
 
e91585d
429f61d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import cv2
import torch
import numpy as np
from PIL import Image
from torchvision import transforms
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
import matplotlib.pyplot as plt
import gradio as gr
# import segmentation_models_pytorch as smp



# image= cv2.imread('image_4.png', cv2.IMREAD_COLOR)
def get_masks(model_type, image):
    if model_type == 'vit_h':
        sam = sam_model_registry["vit_h"](checkpoint="sam_vit_h_4b8939.pth")

        masks_h = mask_generator_h.generate(image)
    if model_type == 'vit_b':
        sam = sam_model_registry["vit_b"](checkpoint="sam_vit_b_01ec64.pth")
        
    if model_type == 'vit_l':
        sam = sam_model_registry["vit_l"](checkpoint="sam_vit_l_0b3195.pth")
    
    mask_generator = SamAutomaticMaskGenerator(sam)
    masks = mask_generator.generate(image)
    for i, mask_data in enumerate(masks):
        mask = mask_data['segmentation']
        color = colors[i]
        composite_image[mask] = (color[:3] * 255).astype(np.uint8)  # Apply color to mask
    
    # Combine original image with the composite mask image
    overlayed_image = (composite_image * 0.5 + image_cv.squeeze().permute(1, 2, 0).cpu().numpy() * 0.5).astype(np.uint8)
    return overlayed_image



iface = gr.Interface(
    fn=get_masks,
    inputs=[gr.inputs.Image(type="pil"), gr.inputs.Dropdown(['vit_h', 'vit_b', 'vit_l'], label="Model Type")],
    outputs=gr.outputs.Image(type="pil"),
    title="SAM Model Segmentation and Classification",
    description="Upload an image, select a model type, and receive the segmented and classified parts."
)


iface.launch()