File size: 4,453 Bytes
ff7f0d1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset
# script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic Segmentation of Underwater IMagery (SUIM) dataset"""


import os

import datasets


_CITATION = """\
@inproceedings{islam2020suim,
  title={{Semantic Segmentation of Underwater Imagery: Dataset and Benchmark}},
  author={Islam, Md Jahidul and Edge, Chelsey and Xiao, Yuyang and Luo, Peigen and Mehtaz, 
              Muntaqim and Morse, Christopher and Enan, Sadman Sakib and Sattar, Junaed},
  booktitle={IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)},
  year={2020},
  organization={IEEE/RSJ}
}
"""

_DESCRIPTION = """\
The SUIM dataset is a dataset for semantic segmentation of underwater imagery.

The dataset consists of 1525 annotated images for training/validation and
110 samples for testing.

| Object category                  | Symbol | RGB color code |
|----------------------------------|--------|----------------|
| Background (waterbody)           | BW     | 000 (black)    |
| Human divers                     | HD     | 001 (blue)     |
| Aquatic plants and sea-grass     | PF     | 010 (green)    |
| Wrecks and ruins                 | WR     | 011 (sky)      |
| Robots (AUVs/ROVs/instruments)   | RO     | 100 (red)      |
| Reefs and invertebrates          | RI     | 101 (pink)     |
| Fish and vertebrates             | FV     | 110 (yellow)   |
| Sea-floor and rocks              | SR     | 111 (white)    |


For more information about the original SUIM dataset,
please visit the official dataset page:

https://irvlab.cs.umn.edu/resources/suim-dataset

Please refer to the original dataset source for any additional details,
citations, or specific usage guidelines provided by the dataset creators.
"""

_HOMEPAGE = "https://irvlab.cs.umn.edu/resources/suim-dataset"

_LICENSE = "mit"


class ExDark(datasets.GeneratorBasedBuilder):
    """Semantic Segmentation of Underwater IMagery (SUIM) dataset"""

    VERSION = datasets.Version("1.0.0")

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="suim",
            version=VERSION,
            description="Semantic Segmentation of Underwater IMagery (SUIM) dataset",
        ),
    ]

    DEFAULT_CONFIG_NAME = "suim"

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "img": datasets.Image(),
                    "mask": datasets.Image(),
                }
            ),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        data_dir = dl_manager.download_and_extract("SUIM.zip")
        train_dir = os.path.join(data_dir, "SUIM", "train_val")
        test_dir = os.path.join(data_dir, "SUIM", "TEST")

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "data_dir": train_dir,
                    "split": "train",
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "data_dir": test_dir,
                    "split": "test",
                },
            ),
        ]

    def _generate_examples(self, data_dir, split):
        img_dir = os.path.join(data_dir, "images")
        masks_dir = os.path.join(data_dir, "masks")
        img_files = os.listdir(img_dir)

        for idx, img_file in enumerate(img_files):
            img_path = os.path.join(img_dir, img_file)
            mask_path = os.path.join(
                masks_dir,
                img_file.replace(".jpg", ".bmp"),
            )
            record = {
                "img": img_path,
                "mask": mask_path,
            }
            yield idx, record