whyen-wang commited on
Commit
162bd9a
·
1 Parent(s): b95b1c3
Files changed (6) hide show
  1. .gitignore +3 -0
  2. README.md +228 -0
  3. coco_stuff.py +143 -0
  4. data/stuff_train.zip +3 -0
  5. data/stuff_validation.zip +3 -0
  6. prepare.ipynb +155 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ annotations/
2
+ stuff_annotations_trainval2017.zip
3
+ *.jsonl
README.md CHANGED
@@ -1,3 +1,231 @@
1
  ---
2
  license: cc-by-4.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-4.0
3
+ size_categories:
4
+ - n<1K
5
+ task_categories:
6
+ - image-segmentation
7
+ language:
8
+ - en
9
+ pretty_name: COCO Stuff
10
  ---
11
+
12
+ # Dataset Card for "COCO Stuff"
13
+
14
+ ## Quick Start
15
+ ### Usage
16
+ ```python
17
+ >>> from datasets.load import load_dataset
18
+
19
+ >>> dataset = load_dataset('whyen-wang/coco_stuff')
20
+ >>> example = dataset['train'][500]
21
+ >>> print(example)
22
+ {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=640x426>,
23
+ 'bboxes': [
24
+ [192.4199981689453, 220.17999267578125,
25
+ 129.22999572753906, 148.3800048828125],
26
+ [76.94000244140625, 146.6300048828125,
27
+ 104.55000305175781, 109.33000183105469],
28
+ [302.8800048828125, 115.2699966430664,
29
+ 99.11000061035156, 119.2699966430664],
30
+ [0.0, 0.800000011920929,
31
+ 592.5700073242188, 420.25]],
32
+ 'categories': [46, 46, 46, 55],
33
+ 'inst.rles': {
34
+ 'size': [[426, 640], [426, 640], [426, 640], [426, 640]],
35
+ 'counts': [
36
+ 'gU`2b0d;...', 'RXP16m<=...', ']Xn34S=4...', 'n:U2o8W2...'
37
+ ]}}
38
+ ```
39
+
40
+ ### Visualization
41
+ ```python
42
+ >>> import cv2
43
+ >>> import numpy as np
44
+ >>> from PIL import Image
45
+
46
+ >>> def transforms(examples):
47
+ sem_rles = examples.pop('sem.rles')
48
+ annotation = []
49
+ for i in sem_rles:
50
+ sem_rles = [
51
+ {'size': size, 'counts': counts}
52
+ for size, counts in zip(i['size'], i['counts'])
53
+ ]
54
+ annotation.append(maskUtils.decode(sem_rles))
55
+ examples['annotation'] = annotation
56
+ return examples
57
+
58
+ >>> def visualize(example, colors):
59
+ image = np.array(example['image'])
60
+ categories = example['categories']
61
+ masks = example['annotation']
62
+ n = len(categories)
63
+ for i in range(n):
64
+ c = categories[i]
65
+ color = colors[c]
66
+ image[masks[..., i] == 1] = image[masks[..., i] == 1] // 2 + color // 2
67
+ return image
68
+
69
+ >>> dataset.set_transform(transforms)
70
+
71
+ >>> names = dataset['train'].features['categories'].feature.names
72
+
73
+ >>> colors = np.ones((92, 3), np.uint8) * 255
74
+ >>> colors[:, 0] = np.linspace(0, 255, 92)
75
+ >>> colors = cv2.cvtColor(colors[None], cv2.COLOR_HSV2RGB)[0]
76
+
77
+ >>> example = dataset['train'][500]
78
+ >>> Image.fromarray(visualize(example, colors))
79
+ ```
80
+
81
+ ## Table of Contents
82
+ - [Table of Contents](#table-of-contents)
83
+ - [Dataset Description](#dataset-description)
84
+ - [Dataset Summary](#dataset-summary)
85
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
86
+ - [Languages](#languages)
87
+ - [Dataset Structure](#dataset-structure)
88
+ - [Data Instances](#data-instances)
89
+ - [Data Fields](#data-fields)
90
+ - [Data Splits](#data-splits)
91
+ - [Dataset Creation](#dataset-creation)
92
+ - [Curation Rationale](#curation-rationale)
93
+ - [Source Data](#source-data)
94
+ - [Annotations](#annotations)
95
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
96
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
97
+ - [Social Impact of Dataset](#social-impact-of-dataset)
98
+ - [Discussion of Biases](#discussion-of-biases)
99
+ - [Other Known Limitations](#other-known-limitations)
100
+ - [Additional Information](#additional-information)
101
+ - [Dataset Curators](#dataset-curators)
102
+ - [Licensing Information](#licensing-information)
103
+ - [Citation Information](#citation-information)
104
+ - [Contributions](#contributions)
105
+
106
+ ## Dataset Description
107
+
108
+ - **Homepage:** https://cocodataset.org/
109
+ - **Repository:** None
110
+ - **Paper:** [Microsoft COCO: Common Objects in Context](https://arxiv.org/abs/1405.0312)
111
+ - **Leaderboard:** [Papers with Code](https://paperswithcode.com/dataset/coco)
112
+ - **Point of Contact:** None
113
+
114
+ ### Dataset Summary
115
+
116
+ COCO is a large-scale object detection, segmentation, and captioning dataset.
117
+
118
+ ### Supported Tasks and Leaderboards
119
+
120
+ [Image Segmentation](https://huggingface.co/tasks/image-segmentation)
121
+
122
+ ### Languages
123
+
124
+ en
125
+
126
+ ## Dataset Structure
127
+
128
+ ### Data Instances
129
+
130
+ An example looks as follows.
131
+
132
+ ```
133
+ {
134
+ "image": PIL.Image(mode="RGB"),
135
+ "categories": [29, 73, 91],
136
+ "sem.rles": {
137
+ "size": [[426, 640], [426, 640], [426, 640]],
138
+ "counts": [
139
+ "S=7T=O1O0000000000...",
140
+ "c1Y3P:10O1O010O100...",
141
+ "n:U2o8W2N1O1O2M2N2..."
142
+ ]
143
+ }
144
+ }
145
+ ```
146
+
147
+ ### Data Fields
148
+
149
+ [More Information Needed]
150
+
151
+ ### Data Splits
152
+
153
+ | name | train | validation |
154
+ | ------- | ------: | ---------: |
155
+ | default | 118,287 | 5,000 |
156
+
157
+ ## Dataset Creation
158
+
159
+ ### Curation Rationale
160
+
161
+ [More Information Needed]
162
+
163
+ ### Source Data
164
+
165
+ #### Initial Data Collection and Normalization
166
+
167
+ [More Information Needed]
168
+
169
+ #### Who are the source language producers?
170
+
171
+ [More Information Needed]
172
+
173
+ ### Annotations
174
+
175
+ #### Annotation process
176
+
177
+ [More Information Needed]
178
+
179
+ #### Who are the annotators?
180
+
181
+ [More Information Needed]
182
+
183
+ ### Personal and Sensitive Information
184
+
185
+ [More Information Needed]
186
+
187
+ ## Considerations for Using the Data
188
+
189
+ ### Social Impact of Dataset
190
+
191
+ [More Information Needed]
192
+
193
+ ### Discussion of Biases
194
+
195
+ [More Information Needed]
196
+
197
+ ### Other Known Limitations
198
+
199
+ [More Information Needed]
200
+
201
+ ## Additional Information
202
+
203
+ ### Dataset Curators
204
+
205
+ [More Information Needed]
206
+
207
+ ### Licensing Information
208
+
209
+ Creative Commons Attribution 4.0 License
210
+
211
+ ### Citation Information
212
+
213
+ ```
214
+ @article{cocodataset,
215
+ author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and Lubomir D. Bourdev and Ross B. Girshick and James Hays and Pietro Perona and Deva Ramanan and Piotr Doll{'{a} }r and C. Lawrence Zitnick},
216
+ title = {Microsoft {COCO:} Common Objects in Context},
217
+ journal = {CoRR},
218
+ volume = {abs/1405.0312},
219
+ year = {2014},
220
+ url = {http://arxiv.org/abs/1405.0312},
221
+ archivePrefix = {arXiv},
222
+ eprint = {1405.0312},
223
+ timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
224
+ biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},
225
+ bibsource = {dblp computer science bibliography, https://dblp.org}
226
+ }
227
+ ```
228
+
229
+ ### Contributions
230
+
231
+ Thanks to [@github-whyen-wang](https://github.com/whyen-wang) for adding this dataset.
coco_stuff.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ from pathlib import Path
4
+
5
+ _HOMEPAGE = 'https://cocodataset.org/'
6
+ _LICENSE = 'Creative Commons Attribution 4.0 License'
7
+ _DESCRIPTION = 'COCO is a large-scale object detection, segmentation, and captioning dataset.'
8
+ _CITATION = '''\
9
+ @article{cocodataset,
10
+ author = {Tsung{-}Yi Lin and Michael Maire and Serge J. Belongie and Lubomir D. Bourdev and Ross B. Girshick and James Hays and Pietro Perona and Deva Ramanan and Piotr Doll{'{a} }r and C. Lawrence Zitnick},
11
+ title = {Microsoft {COCO:} Common Objects in Context},
12
+ journal = {CoRR},
13
+ volume = {abs/1405.0312},
14
+ year = {2014},
15
+ url = {http://arxiv.org/abs/1405.0312},
16
+ archivePrefix = {arXiv},
17
+ eprint = {1405.0312},
18
+ timestamp = {Mon, 13 Aug 2018 16:48:13 +0200},
19
+ biburl = {https://dblp.org/rec/bib/journals/corr/LinMBHPRDZ14},
20
+ bibsource = {dblp computer science bibliography, https://dblp.org}
21
+ }
22
+ '''
23
+ _NAMES = [
24
+ 'banner', 'blanket', 'branch', 'bridge', 'building-other', 'bush',
25
+ 'cabinet', 'cage', 'cardboard', 'carpet', 'ceiling-other', 'ceiling-tile',
26
+ 'cloth', 'clothes', 'clouds', 'counter', 'cupboard', 'curtain',
27
+ 'desk-stuff', 'dirt', 'door-stuff', 'fence', 'floor-marble',
28
+ 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', 'flower', 'fog',
29
+ 'food-other', 'fruit', 'furniture-other', 'grass', 'gravel', 'ground-other',
30
+ 'hill', 'house', 'leaves', 'light', 'mat', 'metal', 'mirror-stuff',
31
+ 'moss', 'mountain', 'mud', 'napkin', 'net', 'paper', 'pavement',
32
+ 'pillow', 'plant-other', 'plastic', 'platform', 'playingfield',
33
+ 'railing', 'railroad', 'river', 'road', 'rock', 'roof', 'rug', 'salad',
34
+ 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper', 'snow', 'solid-other',
35
+ 'stairs', 'stone', 'straw', 'structural-other', 'table', 'tent',
36
+ 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', 'wall-concrete',
37
+ 'wall-other', 'wall-panel', 'wall-stone', 'wall-tile', 'wall-wood',
38
+ 'water-other', 'waterdrops', 'window-blind', 'window-other', 'wood',
39
+ 'other'
40
+ ]
41
+
42
+
43
+ class COCOStuffConfig(datasets.BuilderConfig):
44
+ '''Builder Config for coco2017'''
45
+
46
+ def __init__(
47
+ self, description, homepage,
48
+ annotation_urls, **kwargs
49
+ ):
50
+ super(COCOStuffConfig, self).__init__(
51
+ version=datasets.Version('1.0.0', ''),
52
+ **kwargs
53
+ )
54
+ self.description = description
55
+ self.homepage = homepage
56
+ url = 'http://images.cocodataset.org/zips/'
57
+ self.train_image_url = url + 'train2017.zip'
58
+ self.val_image_url = url + 'val2017.zip'
59
+ self.train_annotation_urls = annotation_urls['train']
60
+ self.val_annotation_urls = annotation_urls['validation']
61
+
62
+
63
+ class COCOStuff(datasets.GeneratorBasedBuilder):
64
+ BUILDER_CONFIGS = [
65
+ COCOStuffConfig(
66
+ description=_DESCRIPTION,
67
+ homepage=_HOMEPAGE,
68
+ annotation_urls={
69
+ 'train': 'data/stuff_train.zip',
70
+ 'validation': 'data/stuff_validation.zip'
71
+ },
72
+ )
73
+ ]
74
+
75
+ def _info(self):
76
+ features = datasets.Features({
77
+ 'image': datasets.Image(mode='RGB', decode=True, id=None),
78
+ 'categories': datasets.Sequence(
79
+ feature=datasets.ClassLabel(names=_NAMES),
80
+ length=-1, id=None
81
+ ),
82
+ 'sem.rles': datasets.Sequence(
83
+ feature={
84
+ 'size': datasets.Sequence(
85
+ feature=datasets.Value(dtype='int32', id=None),
86
+ length=2, id=None
87
+ ),
88
+ 'counts': datasets.Value(dtype='string', id=None)
89
+ },
90
+ length=-1, id=None
91
+ ),
92
+ })
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=features,
96
+ homepage=_HOMEPAGE,
97
+ license=_LICENSE,
98
+ citation=_CITATION
99
+ )
100
+
101
+ def _split_generators(self, dl_manager):
102
+ train_image_path = dl_manager.download_and_extract(
103
+ self.config.train_image_url
104
+ )
105
+ val_image_path = dl_manager.download_and_extract(
106
+ self.config.val_image_url
107
+ )
108
+ train_annotation_paths = dl_manager.download_and_extract(
109
+ self.config.train_annotation_urls
110
+ )
111
+ val_annotation_paths = dl_manager.download_and_extract(
112
+ self.config.val_annotation_urls
113
+ )
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ gen_kwargs={
118
+ 'image_path': f'{train_image_path}/train2017',
119
+ 'annotation_path': f'{train_annotation_paths}/stuff_train.jsonl'
120
+ }
121
+ ),
122
+ datasets.SplitGenerator(
123
+ name=datasets.Split.VALIDATION,
124
+ gen_kwargs={
125
+ 'image_path': f'{val_image_path}/val2017',
126
+ 'annotation_path': f'{val_annotation_paths}/stuff_validation.jsonl'
127
+ }
128
+ )
129
+ ]
130
+
131
+ def _generate_examples(self, image_path, annotation_path):
132
+ idx = 0
133
+ image_path = Path(image_path)
134
+ with open(annotation_path, 'r', encoding='utf-8') as f:
135
+ for line in f:
136
+ obj = json.loads(line.strip())
137
+ example = {
138
+ 'image': str(image_path / obj['image']),
139
+ 'categories': obj['categories'],
140
+ 'sem.rles': obj['sem.rles']
141
+ }
142
+ yield idx, example
143
+ idx += 1
data/stuff_train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d9b7449692790a259c85805b5f329b7968ec6ad9cfe8115536df136999ff36a
3
+ size 498017066
data/stuff_validation.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8b5ec49fd61b87659eb473d7d4e42ab09ce6449b8eca95839098402799249e1
3
+ size 21404718
prepare.ipynb ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 22,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os\n",
10
+ "import cv2\n",
11
+ "import json\n",
12
+ "import numpy as np\n",
13
+ "import zipfile\n",
14
+ "import requests\n",
15
+ "import jsonlines\n",
16
+ "from tqdm import tqdm\n",
17
+ "from pathlib import Path\n",
18
+ "from pycocotools.coco import COCO\n",
19
+ "from pycocotools import mask as maskUtils"
20
+ ]
21
+ },
22
+ {
23
+ "cell_type": "markdown",
24
+ "metadata": {},
25
+ "source": [
26
+ "# Download Annotations"
27
+ ]
28
+ },
29
+ {
30
+ "cell_type": "code",
31
+ "execution_count": 2,
32
+ "metadata": {},
33
+ "outputs": [],
34
+ "source": [
35
+ "url = 'http://images.cocodataset.org/annotations/'\n",
36
+ "file = 'stuff_annotations_trainval2017.zip'\n",
37
+ "if not Path(f'./{file}').exists():\n",
38
+ " response = requests.get(url + file)\n",
39
+ " with open(file, 'wb') as f:\n",
40
+ " f.write(response.content)\n",
41
+ "\n",
42
+ " with zipfile.ZipFile(file, 'r') as zipf:\n",
43
+ " zipf.extractall(Path())\n",
44
+ "\n",
45
+ "for split in ['train', 'val']:\n",
46
+ " file = f'./annotations/stuff_{split}2017_pixelmaps'\n",
47
+ " if not Path(file).exists():\n",
48
+ " with zipfile.ZipFile(file + '.zip', 'r') as zipf:\n",
49
+ " zipf.extractall(Path('./annotations'))\n"
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "markdown",
54
+ "metadata": {},
55
+ "source": [
56
+ "# Stuff Segmentation Task"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": 4,
62
+ "metadata": {},
63
+ "outputs": [
64
+ {
65
+ "name": "stdout",
66
+ "output_type": "stream",
67
+ "text": [
68
+ "loading annotations into memory...\n",
69
+ "Done (t=6.97s)\n",
70
+ "creating index...\n",
71
+ "index created!\n",
72
+ "loading annotations into memory...\n",
73
+ "Done (t=0.40s)\n",
74
+ "creating index...\n",
75
+ "index created!\n"
76
+ ]
77
+ }
78
+ ],
79
+ "source": [
80
+ "train_data = COCO('annotations/stuff_train2017.json')\n",
81
+ "val_data = COCO('annotations/stuff_val2017.json')"
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": null,
87
+ "metadata": {},
88
+ "outputs": [],
89
+ "source": [
90
+ "for split, data in zip(['train', 'validation'], [train_data, val_data]):\n",
91
+ " with jsonlines.open(f'data/stuff_{split}.jsonl', mode='w') as writer:\n",
92
+ " for image_id, image_info in tqdm(data.imgs.items()):\n",
93
+ " categories, sem_rles = [], []\n",
94
+ " anns = data.imgToAnns[image_id]\n",
95
+ " file_name = image_info['file_name']\n",
96
+ " height, width = image_info['height'], image_info['width']\n",
97
+ " for ann in anns:\n",
98
+ " categories.append(ann['category_id'] - 92)\n",
99
+ " segm = ann['segmentation']\n",
100
+ " if isinstance(segm, list):\n",
101
+ " rles = maskUtils.frPyObjects(segm, height, width)\n",
102
+ " rle = maskUtils.merge(rles)\n",
103
+ " rle['counts'] = rle['counts'].decode()\n",
104
+ " elif isinstance(segm['counts'], list):\n",
105
+ " rle = maskUtils.frPyObjects(segm, height, width)\n",
106
+ " else:\n",
107
+ " rle = segm\n",
108
+ " sem_rles.append(rle)\n",
109
+ " writer.write({\n",
110
+ " 'image': file_name, 'categories': categories, 'sem.rles': sem_rles\n",
111
+ " })"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": 23,
117
+ "metadata": {},
118
+ "outputs": [],
119
+ "source": [
120
+ "for split in ['train', 'validation']:\n",
121
+ " file_path = f'data/stuff_{split}.jsonl'\n",
122
+ " with zipfile.ZipFile(f'data/stuff_{split}.zip', 'w', zipfile.ZIP_DEFLATED) as zipf:\n",
123
+ " zipf.write(file_path, os.path.basename(file_path))"
124
+ ]
125
+ },
126
+ {
127
+ "cell_type": "code",
128
+ "execution_count": null,
129
+ "metadata": {},
130
+ "outputs": [],
131
+ "source": []
132
+ }
133
+ ],
134
+ "metadata": {
135
+ "kernelspec": {
136
+ "display_name": ".venv",
137
+ "language": "python",
138
+ "name": "python3"
139
+ },
140
+ "language_info": {
141
+ "codemirror_mode": {
142
+ "name": "ipython",
143
+ "version": 3
144
+ },
145
+ "file_extension": ".py",
146
+ "mimetype": "text/x-python",
147
+ "name": "python",
148
+ "nbconvert_exporter": "python",
149
+ "pygments_lexer": "ipython3",
150
+ "version": "3.12.2"
151
+ }
152
+ },
153
+ "nbformat": 4,
154
+ "nbformat_minor": 2
155
+ }