File size: 3,885 Bytes
2f9e8d9
7c6e3fa
 
 
 
 
 
 
 
 
 
2dda33e
7c6e3fa
 
 
 
 
2dda33e
7c6e3fa
 
ec0c98e
7c6e3fa
 
 
 
18b3b6b
26b559b
 
 
7c6e3fa
 
 
 
 
7f91632
 
 
 
 
 
7c6e3fa
 
 
 
7f91632
 
 
 
 
 
 
 
 
 
 
 
7c6e3fa
 
 
 
 
 
7f91632
dca63fe
7c6e3fa
7f91632
44de0e4
7f91632
 
7c6e3fa
 
 
 
26b559b
7f91632
 
dca63fe
7c6e3fa
2161e36
 
 
26b559b
7f91632
 
dca63fe
2161e36
01c00fe
db99244
01c00fe
26b559b
7f91632
 
dca63fe
7f91632
7c6e3fa
 
26b559b
7f91632
dca63fe
2d020de
01c00fe
e5e68d6
2d020de
 
26b559b
2d020de
 
724ca23
2d020de
01c00fe
2d020de
01c00fe
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
# Source: https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py

import csv
import json
import os

import datasets

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {Boat dataset},
author={Tzu-Chi Chen, Inc.},
year={2024}
}
"""

_DESCRIPTION = """\
This dataset is designed to solve an object detection task with images of boats.
"""

_HOMEPAGE = "https://huggingface.co/datasets/zhuchi76/Boat_dataset/resolve/main"

_LICENSE = ""

_URLS = {
    "classes": f"{_HOMEPAGE}/data/classes.txt",
    "train": f"{_HOMEPAGE}/data/instances_train2023.jsonl",
    "val": f"{_HOMEPAGE}/data/instances_val2023.jsonl",
    "test": f"{_HOMEPAGE}/data/instances_val2023r.jsonl"
}

class BoatDataset(datasets.GeneratorBasedBuilder):

    VERSION = datasets.Version("1.1.0")
    
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name="Boat_dataset", version=VERSION, description="Dataset for detecting boats in aerial images."),
    ]

    DEFAULT_CONFIG_NAME = "Boat_dataset"  # Provide a default configuration

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                'image_id': datasets.Value('int32'),
                'image_path': datasets.Value('string'),
                'width': datasets.Value('int32'),
                'height': datasets.Value('int32'),
                'objects': datasets.Features({
                    'id': datasets.Sequence(datasets.Value('int32')),
                    'area': datasets.Sequence(datasets.Value('float32')),
                    'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('float32'), length=4)),  # [x, y, width, height]
                    'category': datasets.Sequence(datasets.Value('int32'))
                }),
            }),
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        # Download all files and extract them
        downloaded_files = dl_manager.download_and_extract(_URLS)

        # Load class labels from the classes file
        with open('classes.txt', 'r') as file:
            classes = [line.strip() for line in file.readlines()]
        
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "annotations_file": downloaded_files["train"],
                    "classes": classes,
                    "split": "train",
                }
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "annotations_file": downloaded_files["val"],
                    "classes": classes,
                    "split": "val",
                }
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "annotations_file": downloaded_files["test"],
                    "classes": classes,
                    "split": "val_real",
                }
            ),
        ]

    def _generate_examples(self, annotations_file, classes, split):
        # Process annotations
        with open(annotations_file, encoding="utf-8") as f:
            for key, row in enumerate(f):
                try:
                    data = json.loads(row.strip())
                    yield key, {
                        "image_id": data["image_id"],
                        "image_path": data["image_path"],
                        "width": data["width"],
                        "height": data["height"],
                        "objects": data["objects"],
                    }
                except json.JSONDecodeError:
                    print(f"Skipping invalid JSON at line {key + 1}: {row}")
                    continue