kumapo commited on
Commit
46539c5
·
1 Parent(s): 643af36

Upload coco_dataset_script

Browse files
Files changed (1) hide show
  1. coco_dataset_script.py +207 -0
coco_dataset_script.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # copied from https://huggingface.co/datasets/ydshieh/coco_dataset_script/blob/main/coco_dataset_script.py
2
+ import json
3
+ import os
4
+ import datasets
5
+
6
+
7
+ class COCOBuilderConfig(datasets.BuilderConfig):
8
+
9
+ def __init__(self, name, splits, **kwargs):
10
+ super().__init__(name, **kwargs)
11
+ self.splits = splits
12
+
13
+
14
+ # Add BibTeX citation
15
+ # Find for instance the citation on arxiv or on the dataset repo/website
16
+ _CITATION = """\
17
+ @article{DBLP:journals/corr/LinMBHPRDZ14,
18
+ author = {Tsung{-}Yi Lin and
19
+ Michael Maire and
20
+ Serge J. Belongie and
21
+ Lubomir D. Bourdev and
22
+ Ross B. Girshick and
23
+ James Hays and
24
+ Pietro Perona and
25
+ Deva Ramanan and
26
+ Piotr Doll{'{a} }r and
27
+ C. Lawrence Zitnick},
28
+ title = {Microsoft {COCO:} Common Objects in Context},
29
+ journal = {CoRR},
30
+ volume = {abs/1405.0312},
31
+ year = {2014},
32
+ url = {http://arxiv.org/abs/1405.0312},
33
+ archivePrefix = {arXiv},
34
+ eprint = {1405.0312},
35
+ timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
36
+ biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},
37
+ bibsource = {dblp computer science bibliography, https://dblp.org}
38
+ }
39
+ """
40
+
41
+ # Add description of the dataset here
42
+ # You can copy an official description
43
+ _DESCRIPTION = """\
44
+ COCO is a large-scale object detection, segmentation, and captioning dataset.
45
+ """
46
+
47
+ # Add a link to an official homepage for the dataset here
48
+ _HOMEPAGE = "http://cocodataset.org/#home"
49
+
50
+ # Add the licence for the dataset here if you can find it
51
+ _LICENSE = ""
52
+
53
+ # Add link to the official dataset URLs here
54
+ # The HuggingFace dataset library don't host the datasets but only point to the original files
55
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
56
+
57
+ # This script is supposed to work with local (downloaded) COCO dataset.
58
+ _URLs = {}
59
+
60
+
61
+ # Name of the dataset usually match the script name with CamelCase instead of snake_case
62
+ class COCODataset(datasets.GeneratorBasedBuilder):
63
+ """An example dataset script to work with the local (downloaded) COCO dataset"""
64
+
65
+ VERSION = datasets.Version("0.0.0")
66
+
67
+ BUILDER_CONFIG_CLASS = COCOBuilderConfig
68
+ BUILDER_CONFIGS = [
69
+ COCOBuilderConfig(name='2017', splits=['train', 'valid', 'test']),
70
+ ]
71
+ DEFAULT_CONFIG_NAME = "2017"
72
+
73
+ def _info(self):
74
+ # This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
75
+
76
+ feature_dict = {
77
+ "image_id": datasets.Value("int64"),
78
+ "caption_id": datasets.Value("int64"),
79
+ "caption": datasets.Value("string"),
80
+ "height": datasets.Value("int64"),
81
+ "width": datasets.Value("int64"),
82
+ "file_name": datasets.Value("string"),
83
+ "coco_url": datasets.Value("string"),
84
+ "image_path": datasets.Value("string"),
85
+ }
86
+
87
+ features = datasets.Features(feature_dict)
88
+
89
+ return datasets.DatasetInfo(
90
+ # This is the description that will appear on the datasets page.
91
+ description=_DESCRIPTION,
92
+ # This defines the different columns of the dataset and their types
93
+ features=features, # Here we define them above because they are different between the two configurations
94
+ # If there's a common (input, target) tuple from the features,
95
+ # specify them here. They'll be used if as_supervised=True in
96
+ # builder.as_dataset.
97
+ supervised_keys=None,
98
+ # Homepage of the dataset for documentation
99
+ homepage=_HOMEPAGE,
100
+ # License for the dataset if available
101
+ license=_LICENSE,
102
+ # Citation for the dataset
103
+ citation=_CITATION,
104
+ )
105
+
106
+ def _split_generators(self, dl_manager):
107
+ """Returns SplitGenerators."""
108
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
109
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
110
+
111
+ data_dir = self.config.data_dir
112
+ if not data_dir:
113
+ raise ValueError(
114
+ "This script is supposed to work with local (downloaded) COCO dataset. The argument `data_dir` in `load_dataset()` is required."
115
+ )
116
+
117
+ splits = []
118
+ for split in self.config.splits:
119
+ if split == 'train':
120
+ dataset = datasets.SplitGenerator(
121
+ name=datasets.Split.TRAIN,
122
+ # These kwargs will be passed to _generate_examples
123
+ gen_kwargs={
124
+ "json_path": os.path.join(data_dir, "annotations", "captions_train2017.json"),
125
+ "image_dir": os.path.join(data_dir, "train2017"),
126
+ "split": "train",
127
+ }
128
+ )
129
+ elif split in ['val', 'valid', 'validation', 'dev']:
130
+ dataset = datasets.SplitGenerator(
131
+ name=datasets.Split.VALIDATION,
132
+ # These kwargs will be passed to _generate_examples
133
+ gen_kwargs={
134
+ "json_path": os.path.join(data_dir, "annotations", "captions_val2017.json"),
135
+ "image_dir": os.path.join(data_dir, "val2017"),
136
+ "split": "valid",
137
+ },
138
+ )
139
+ elif split == 'test':
140
+ dataset = datasets.SplitGenerator(
141
+ name=datasets.Split.TEST,
142
+ # These kwargs will be passed to _generate_examples
143
+ gen_kwargs={
144
+ "json_path": os.path.join(data_dir, "annotations", "image_info_test2017.json"),
145
+ "image_dir": os.path.join(data_dirarc, "test2017"),
146
+ "split": "test",
147
+ },
148
+ )
149
+ else:
150
+ continue
151
+
152
+ splits.append(dataset)
153
+
154
+ return splits
155
+
156
+ def _generate_examples(
157
+ # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
158
+ self, json_path, image_dir, split
159
+ ):
160
+ """ Yields examples as (key, example) tuples. """
161
+ # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
162
+ # The `key` is here for legacy reason (tfds) and is not important in itself.
163
+
164
+ _features = ["image_id", "caption_id", "caption", "height", "width", "file_name", "coco_url", "image_path", "id"]
165
+ features = list(_features)
166
+
167
+ if split in "valid":
168
+ split = "val"
169
+
170
+ with open(json_path, 'r', encoding='UTF-8') as fp:
171
+ data = json.load(fp)
172
+
173
+ # list of dict
174
+ images = data["images"]
175
+ entries = images
176
+
177
+ # build a dict of image_id -> image info dict
178
+ d = {image["id"]: image for image in images}
179
+
180
+ # list of dict
181
+ if split in ["train", "val"]:
182
+ annotations = data["annotations"]
183
+
184
+ # build a dict of image_id ->
185
+ for annotation in annotations:
186
+ _id = annotation["id"]
187
+ image_info = d[annotation["image_id"]]
188
+ annotation.update(image_info)
189
+ annotation["id"] = _id
190
+
191
+ entries = annotations
192
+
193
+ for id_, entry in enumerate(entries):
194
+
195
+ entry = {k: v for k, v in entry.items() if k in features}
196
+
197
+ if split == "test":
198
+ entry["image_id"] = entry["id"]
199
+ entry["id"] = -1
200
+ entry["caption"] = -1
201
+
202
+ entry["caption_id"] = entry.pop("id")
203
+ entry["image_path"] = os.path.join(image_dir, entry["file_name"])
204
+
205
+ entry = {k: entry[k] for k in _features if k in entry}
206
+
207
+ yield str((entry["image_id"], entry["caption_id"])), entry