File size: 3,736 Bytes
e36cc5b
 
 
 
e9de875
e36cc5b
 
 
e9de875
 
c7c02f9
7d4df53
 
 
e9de875
 
 
 
e36cc5b
 
 
c7c02f9
e36cc5b
 
c7c02f9
e36cc5b
 
 
 
 
c7c02f9
 
e36cc5b
 
 
 
 
 
c7c02f9
e36cc5b
7d4df53
 
c7c02f9
 
 
 
 
 
 
 
 
e9de875
c7c02f9
e36cc5b
 
 
 
 
 
 
 
7d4df53
 
e36cc5b
 
 
 
 
 
 
 
 
 
 
7d4df53
e36cc5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
from datasets import load_dataset
from PIL import Image, ImageDraw
import numpy as np
from rdp import rdp

# Load the dataset
dataset = load_dataset("dwb2023/brain-tumor-image-dataset-semantic-segmentation", split="test")

def simplify_segmentation(segmentation, max_points=20):
    if not segmentation or len(segmentation) == 0:
        return []
    epsilon = 1.0
    simplified = rdp(np.array(segmentation), epsilon=epsilon)
    while len(simplified) > max_points:
        epsilon *= 1.5
        simplified = rdp(np.array(segmentation), epsilon=epsilon)
    return simplified.tolist()

def draw_annotations(index):
    try:
        # Fetch the image and annotations from the dataset
        record = dataset[index]
        
        # Convert image to PIL Image if it's a numpy array
        if isinstance(record['image'], np.ndarray):
            img = Image.fromarray(record['image'])
        else:
            img = record['image']
        
        img = img.convert("RGB")  # Ensure the image is in RGB mode
        
        draw = ImageDraw.Draw(img)

        # Draw bounding box
        bbox = record["bbox"]
        draw.rectangle([bbox[0], bbox[1], bbox[0] + bbox[2], bbox[1] + bbox[3]], outline="red", width=2)

        # Draw original segmentation mask
        segmentation = record["segmentation"]
        original_points = 0
        simplified_points = 0
        for seg in segmentation:
            if seg:  # Check if the segmentation is not empty
                draw.polygon(seg, outline="blue", width=2)
                original_points += len(seg)
                
                # Simplify and draw simplified segmentation
                simplified_seg = simplify_segmentation(seg)
                draw.polygon(simplified_seg, outline="green", width=2)
                simplified_points += len(simplified_seg)

        # Prepare additional information
        category_id = record["category_id"]
        area = record["area"]
        file_name = record["file_name"]

        info = f"File Name: {file_name}\n"
        info += f"Image ID: {record['id']}\n"
        info += f"Category ID: {category_id}\n"
        info += f"Bounding Box: [{bbox[0]:.2f}, {bbox[1]:.2f}, {bbox[2]:.2f}, {bbox[3]:.2f}]\n"
        info += f"Original Segmentation Points: {original_points}\n"
        info += f"Simplified Segmentation Points: {simplified_points}\n"
        info += f"Area: {area:.2f}"

        return img, info
    except Exception as e:
        print(f"Error processing image at index {index}: {e}")
        return Image.new('RGB', (300, 300), color='gray'), f"Error loading image information: {str(e)}"

# Create Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# Brain Tumor Image Dataset Viewer")
    gr.Markdown("## Refer to the [dwb2023/brain-tumor-image-dataset-semantic-segmentation](https://huggingface.co/datasets/dwb2023/brain-tumor-image-dataset-semantic-segmentation/viewer/default/test) dataset for more information")
    gr.Markdown("### Red: Bounding Box, Blue: Original Segmentation, Green: Simplified Segmentation (max 20 points)")
    
    with gr.Row():
        with gr.Column(scale=1):
            image_output = gr.Image(label="Annotated Image")
        with gr.Column(scale=1):
            image_index = gr.Slider(minimum=0, maximum=len(dataset)-1, step=1, value=0, label="Image ID Slider")
            info_output = gr.Textbox(label="Image Information", lines=10)
    
    # Update image and info when slider changes
    image_index.change(draw_annotations, inputs=image_index, outputs=[image_output, info_output])

    # Display initial image and info
    demo.load(draw_annotations, inputs=image_index, outputs=[image_output, info_output])

demo.launch(debug=True)