|
import os |
|
import json |
|
import logging |
|
import datasets |
|
import xml.etree.ElementTree as ET |
|
from collections import defaultdict |
|
|
|
|
|
_CITATION = """ |
|
MINICOCO2017 |
|
""" |
|
|
|
_DESCRIPTION = """ |
|
MINICOCO2017 |
|
""" |
|
|
|
_URLS = { |
|
"minicoco2017": "minicoco.tar.gz" |
|
} |
|
|
|
|
|
CLASS_INFOS = [ |
|
|
|
('person', 1, 0), |
|
('bicycle', 2, 1), |
|
('car', 3, 2), |
|
('motorcycle', 4, 3), |
|
('airplane', 5, 4), |
|
('bus', 6, 5), |
|
('train', 7, 6), |
|
('truck', 8, 7), |
|
('boat', 9, 8), |
|
('traffic light', 10, 9), |
|
('fire hydrant', 11, 10), |
|
('stop sign', 13, 11), |
|
('parking meter', 14, 12), |
|
('bench', 15, 13), |
|
('bird', 16, 14), |
|
('cat', 17, 15), |
|
('dog', 18, 16), |
|
('horse', 19, 17), |
|
('sheep', 20, 18), |
|
('cow', 21, 19), |
|
('elephant', 22, 20), |
|
('bear', 23, 21), |
|
('zebra', 24, 22), |
|
('giraffe', 25, 23), |
|
('backpack', 27, 24), |
|
('umbrella', 28, 25), |
|
('handbag', 31, 26), |
|
('tie', 32, 27), |
|
('suitcase', 33, 28), |
|
('frisbee', 34, 29), |
|
('skis', 35, 30), |
|
('snowboard', 36, 31), |
|
('sports ball', 37, 32), |
|
('kite', 38, 33), |
|
('baseball bat', 39, 34), |
|
('baseball glove', 40, 35), |
|
('skateboard', 41, 36), |
|
('surfboard', 42, 37), |
|
('tennis racket', 43, 38), |
|
('bottle', 44, 39), |
|
('wine glass', 46, 40), |
|
('cup', 47, 41), |
|
('fork', 48, 42), |
|
('knife', 49, 43), |
|
('spoon', 50, 44), |
|
('bowl', 51, 45), |
|
('banana', 52, 46), |
|
('apple', 53, 47), |
|
('sandwich', 54, 48), |
|
('orange', 55, 49), |
|
('broccoli', 56, 50), |
|
('carrot', 57, 51), |
|
('hot dog', 58, 52), |
|
('pizza', 59, 53), |
|
('donut', 60, 54), |
|
('cake', 61, 55), |
|
('chair', 62, 56), |
|
('couch', 63, 57), |
|
('potted plant', 64, 58), |
|
('bed', 65, 59), |
|
('dining table', 67, 60), |
|
('toilet', 70, 61), |
|
('tv', 72, 62), |
|
('laptop', 73, 63), |
|
('mouse', 74, 64), |
|
('remote', 75, 65), |
|
('keyboard', 76, 66), |
|
('cell phone', 77, 67), |
|
('microwave', 78, 68), |
|
('oven', 79, 69), |
|
('toaster', 80, 70), |
|
('sink', 81, 71), |
|
('refrigerator', 82, 72), |
|
('book', 84, 73), |
|
('clock', 85, 74), |
|
('vase', 86, 75), |
|
('scissors', 87, 76), |
|
('teddy bear', 88, 77), |
|
('hair drier', 89, 78), |
|
('toothbrush', 90, 79) |
|
] |
|
|
|
KEYPOINTS_INFOS=[ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
('none', 1, 0), |
|
('nose', 2, 1), |
|
('left_eye', 3, 2), |
|
('right_eye', 4, 3), |
|
('left_ear', 5, 4), |
|
('right_ear', 6, 5), |
|
('left_shoulder', 7, 6), |
|
('right_shoulder', 8, 7), |
|
('left_elbow', 9, 8), |
|
('right_elbow', 10, 9), |
|
('left_wrist', 11, 10), |
|
('right_wrist', 12, 11), |
|
('left_hip', 13, 12), |
|
('right_hip', 14, 13), |
|
('left_knee', 15, 14), |
|
('right_knee', 16, 15), |
|
('left_ankle', 17, 16), |
|
('right_ankle', 18, 17) |
|
] |
|
|
|
|
|
|
|
CLASS_NAMES = [CLASS_INFO[0] for CLASS_INFO in CLASS_INFOS] |
|
KEYPOINTS_NAMES = [KEYPOINTS_INFO[0] for KEYPOINTS_INFO in KEYPOINTS_INFOS] |
|
|
|
CLASS_DICT = {CLASS_INFO[0]: CLASS_INFO[2] for CLASS_INFO in CLASS_INFOS} |
|
CATEGORY_ID2CLASS_NAMES = {CLASS_INFO[1]: CLASS_INFO[0] for CLASS_INFO in CLASS_INFOS} |
|
KEYPOINTS_DICT = {KEYPOINTS_INFO[0]: KEYPOINTS_INFO[1] for KEYPOINTS_INFO in KEYPOINTS_INFOS} |
|
|
|
|
|
|
|
detection_features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"image": datasets.Value("string"), |
|
"height": datasets.Value("int32"), |
|
"width": datasets.Value("int32"), |
|
"objects": datasets.features.Sequence( |
|
{ |
|
"bboxes": datasets.Sequence(datasets.Value("float32")), |
|
"classes": datasets.features.ClassLabel(names=CLASS_NAMES), |
|
} |
|
), |
|
} |
|
) |
|
|
|
segmentation_features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"image": datasets.Value("string"), |
|
"height": datasets.Value("int32"), |
|
"width": datasets.Value("int32"), |
|
"objects": datasets.features.Sequence( |
|
{ |
|
"bboxes": datasets.Sequence(datasets.Value("float32")), |
|
"classes": datasets.features.ClassLabel(names=CLASS_NAMES), |
|
'segmentation':datasets.Sequence(datasets.Value("float32")), |
|
'iscrowd':datasets.Value("int32"), |
|
} |
|
), |
|
} |
|
) |
|
|
|
captions_features = datasets.Features( |
|
{ |
|
"id": datasets.Value("int32"), |
|
"image": datasets.Value("string"), |
|
"height": datasets.Value("int32"), |
|
"width": datasets.Value("int32"), |
|
"captions": datasets.features.Sequence(datasets.Value("string")), |
|
} |
|
) |
|
|
|
keypoint_features = datasets.Features( |
|
|
|
|
|
{ |
|
"id": datasets.Value("int32"), |
|
"image": datasets.Value("string"), |
|
"height": datasets.Value("int32"), |
|
"width": datasets.Value("int32"), |
|
"objects": datasets.features.Sequence( |
|
{ |
|
"bboxes": datasets.Sequence(datasets.Value("float32")), |
|
"classes": datasets.features.ClassLabel(names=CLASS_NAMES), |
|
'keypoints':datasets.Sequence(datasets.Value("float32")), |
|
"num_keypoints":datasets.Value("int32") |
|
} |
|
), |
|
} |
|
) |
|
|
|
_DATASET_FEATURES = { |
|
"detection": detection_features, |
|
"segmentation":segmentation_features, |
|
"caption": captions_features, |
|
"keypoint": keypoint_features |
|
} |
|
|
|
|
|
def get_captions_annotation(captions_path): |
|
with open(captions_path,'r') as f: |
|
anno_captions = json.load(f) |
|
|
|
anno_infos = defaultdict(list) |
|
images_infos = list() |
|
|
|
for caption_info in anno_captions['annotations']: |
|
|
|
caption = caption_info['caption'] |
|
image_id = caption_info['image_id'] |
|
|
|
anno_infos[image_id].append(caption) |
|
|
|
for image in anno_captions['images']: |
|
|
|
images_infos.append({ |
|
"image_name":image['file_name'], |
|
"height": image["height"], |
|
"width":image["width"], |
|
"image_id":image['id'] |
|
}) |
|
|
|
return anno_infos, images_infos |
|
|
|
|
|
def get_instances_annotation(instances_path): |
|
with open(instances_path,'r') as f: |
|
anno_instances = json.load(f) |
|
|
|
anno_infos = dict() |
|
images_infos = list() |
|
|
|
for instance_info in anno_instances['annotations']: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
bbox = instance_info['bbox'] |
|
image_id = instance_info['image_id'] |
|
segmentation = instance_info['segmentation'][0] |
|
|
|
if image_id in anno_infos: |
|
anno_infos[image_id].append( |
|
{ |
|
"segmentation": segmentation, |
|
"bbox": bbox, |
|
'iscrowd':instance_info['iscrowd'], |
|
"classes":CATEGORY_ID2CLASS_NAMES[instance_info['category_id']] |
|
} |
|
) |
|
else: |
|
anno_infos[image_id]=[ |
|
{ |
|
"segmentation": segmentation, |
|
"bbox": bbox, |
|
'iscrowd':instance_info['iscrowd'], |
|
"classes":CATEGORY_ID2CLASS_NAMES[instance_info['category_id']] |
|
} |
|
] |
|
|
|
|
|
for image in anno_instances['images']: |
|
|
|
|
|
|
|
|
|
images_infos.append({ |
|
"image_name":image['file_name'], |
|
"height": image["height"], |
|
"width":image["width"], |
|
"image_id":image['id'] |
|
}) |
|
|
|
return anno_infos, images_infos |
|
|
|
|
|
def get_keypoints_annotation(keypoints_path): |
|
with open(keypoints_path,'r') as f: |
|
anno_keypoints = json.load(f) |
|
|
|
anno_infos = dict() |
|
images_infos = list() |
|
|
|
for keypoint_info in anno_keypoints['annotations']: |
|
|
|
bbox = keypoint_info['bbox'] |
|
image_id = keypoint_info['image_id'] |
|
|
|
if image_id in anno_infos: |
|
anno_infos[image_id].append( |
|
{ |
|
"bbox": bbox, |
|
"classes":CATEGORY_ID2CLASS_NAMES[keypoint_info['category_id']], |
|
'keypoints':keypoint_info['keypoints'], |
|
"num_keypoints":keypoint_info['num_keypoints'], |
|
} |
|
) |
|
else: |
|
anno_infos[image_id]=[ |
|
{ |
|
"bbox": bbox, |
|
"classes":CATEGORY_ID2CLASS_NAMES[keypoint_info['category_id']], |
|
'keypoints':keypoint_info['keypoints'], |
|
"num_keypoints":keypoint_info['num_keypoints'], |
|
} |
|
] |
|
|
|
|
|
for image in anno_keypoints['images']: |
|
|
|
images_infos.append({ |
|
"image_name":image['file_name'], |
|
"height": image["height"], |
|
"width":image["width"], |
|
"image_id":image['id'] |
|
}) |
|
|
|
return anno_infos, images_infos |
|
|
|
|
|
class MINICOCOConfig(datasets.BuilderConfig): |
|
def __init__(self, data_name, task_name, **kwargs): |
|
""" |
|
|
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(**kwargs) |
|
assert data_name in ["minicoco2017"] and task_name in [ |
|
"detection", |
|
"segmentation", |
|
"caption", |
|
"keypoint" |
|
] |
|
self.data_name = data_name |
|
self.task_name = task_name |
|
|
|
|
|
class PASCALDataset(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
MINICOCOConfig( |
|
name="minicoco2017_detection", |
|
version=datasets.Version("1.0.0", ""), |
|
description="minicoco2017 detection dataset", |
|
data_name="minicoco2017", |
|
task_name="detection", |
|
), |
|
MINICOCOConfig( |
|
name="minicoco2017_segmentation", |
|
version=datasets.Version("1.0.0", ""), |
|
description="minicoco2017 segmentation dataset", |
|
data_name="minicoco2017", |
|
task_name="segmentation", |
|
), |
|
MINICOCOConfig( |
|
name="minicoco2017_caption", |
|
version=datasets.Version("1.0.0", ""), |
|
description="minicoco2017 caption dataset", |
|
data_name="minicoco2017", |
|
task_name="caption", |
|
), |
|
MINICOCOConfig( |
|
name="minicoco2017_keypoint", |
|
version=datasets.Version("1.0.0", ""), |
|
description="minicoco2017 keypoint dataset", |
|
data_name="minicoco2017", |
|
task_name="keypoint", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=_DATASET_FEATURES[self.config.task_name], |
|
|
|
|
|
supervised_keys=None, |
|
homepage="https://fuliucansheng.github.io/", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
downloaded_files = dl_manager.download_and_extract(_URLS[self.config.data_name]) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={"filepath": downloaded_files, "split": "train"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={"filepath": downloaded_files, "split": "val"}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={"filepath": downloaded_files, "split": "test"}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath, split): |
|
"""This function returns the examples in the raw (text) form.""" |
|
|
|
|
|
|
|
logging.info("generating examples from = %s, split = %s", filepath, split) |
|
task_name = self.config.task_name |
|
|
|
if task_name == "caption": |
|
captions_path = os.path.join(filepath, "annotations", "captions_" + split + "2017.json") |
|
anno_infos, images_infos = get_captions_annotation(captions_path) |
|
|
|
for id_, image in enumerate(images_infos): |
|
image_path = os.path.join(filepath, split + "2017", image["image_name"]) |
|
if not os.path.exists(image_path): |
|
continue |
|
example = { |
|
"id": id_, |
|
"image": os.path.abspath(image_path), |
|
"height": image["height"], |
|
"width": image["width"], |
|
"captions": anno_infos[image['image_id']], |
|
} |
|
yield id_, example |
|
|
|
elif task_name=="detection": |
|
instances_path = os.path.join(filepath, "annotations", "instances_" + split + "2017.json") |
|
anno_infos, images_infos = get_instances_annotation(instances_path) |
|
|
|
for id_, image in enumerate(images_infos): |
|
image_path = os.path.join(filepath, split + "2017", image["image_name"]) |
|
if not os.path.exists(image_path): |
|
continue |
|
example = { |
|
"id": id_, |
|
"image": os.path.abspath(image_path), |
|
"height": image["height"], |
|
"width": image["width"], |
|
"objects":[ |
|
{ |
|
"bboxes": object_info["bbox"], |
|
"classes": object_info["classes"] |
|
} |
|
for object_info in anno_infos[image['image_id']] |
|
] |
|
} |
|
yield id_, example |
|
|
|
elif task_name=="segmentation": |
|
instances_path = os.path.join(filepath, "annotations", "instances_" + split + "2017.json") |
|
anno_infos, images_infos = get_instances_annotation(instances_path) |
|
|
|
for id_, image in enumerate(images_infos): |
|
image_path = os.path.join(filepath, split + "2017", image["image_name"]) |
|
if not os.path.exists(image_path): |
|
continue |
|
example = { |
|
"id": id_, |
|
"image": os.path.abspath(image_path), |
|
"height": image["height"], |
|
"width": image["width"], |
|
"objects":[ |
|
{ |
|
"bboxes": object_info["bbox"], |
|
"classes": object_info["classes"], |
|
'segmentation':object_info['segmentation'], |
|
'iscrowd':object_info['iscrowd'] |
|
} |
|
for object_info in anno_infos[image['image_id']] |
|
] |
|
} |
|
yield id_, example |
|
|
|
elif task_name=="keypoint": |
|
keypoints_path = os.path.join(filepath, "annotations", "person_keypoints_" + split + "2017.json") |
|
anno_infos, images_infos = get_keypoints_annotation(keypoints_path) |
|
|
|
for id_, image in enumerate(images_infos): |
|
image_path = os.path.join(filepath, split + "2017", image["image_name"]) |
|
if not os.path.exists(image_path): |
|
continue |
|
example = { |
|
"id": id_, |
|
"image": os.path.abspath(image_path), |
|
"height": image["height"], |
|
"width": image["width"], |
|
"objects":[ |
|
{ |
|
"bboxes": object_info["bbox"], |
|
"classes": object_info["classes"], |
|
'keypoints':object_info['keypoints'], |
|
"num_keypoints":object_info["num_keypoints"] |
|
} |
|
for object_info in anno_infos[image['image_id']] |
|
] |
|
} |
|
yield id_, example |