|
|
|
import contextlib |
|
import datetime |
|
import io |
|
import json |
|
import logging |
|
import os |
|
import shutil |
|
|
|
import numpy as np |
|
import pycocotools.mask as mask_util |
|
|
|
from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes |
|
from detectron2.utils.file_io import PathManager |
|
from fvcore.common.timer import Timer |
|
from iopath.common.file_io import file_lock |
|
from PIL import Image |
|
|
|
from .. import DatasetCatalog, MetadataCatalog |
|
|
|
""" |
|
This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format". |
|
""" |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
__all__ = [ |
|
"load_coco_json", |
|
"load_sem_seg", |
|
"convert_to_coco_json", |
|
"register_coco_instances", |
|
] |
|
|
|
|
|
def load_coco_json( |
|
json_file, image_root, dataset_name=None, extra_annotation_keys=None |
|
): |
|
""" |
|
Load a json file with COCO's instances annotation format. |
|
Currently supports instance detection, instance segmentation, |
|
and person keypoints annotations. |
|
|
|
Args: |
|
json_file (str): full path to the json file in COCO instances annotation format. |
|
image_root (str or path-like): the directory where the images in this json file exists. |
|
dataset_name (str or None): the name of the dataset (e.g., coco_2017_train). |
|
When provided, this function will also do the following: |
|
|
|
* Put "thing_classes" into the metadata associated with this dataset. |
|
* Map the category ids into a contiguous range (needed by standard dataset format), |
|
and add "thing_dataset_id_to_contiguous_id" to the metadata associated |
|
with this dataset. |
|
|
|
This option should usually be provided, unless users need to load |
|
the original json content and apply more processing manually. |
|
extra_annotation_keys (list[str]): list of per-annotation keys that should also be |
|
loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints", |
|
"category_id", "segmentation"). The values for these keys will be returned as-is. |
|
For example, the densepose annotations are loaded in this way. |
|
|
|
Returns: |
|
list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See |
|
`Using Custom Datasets </tutorials/datasets.html>`_ ) when `dataset_name` is not None. |
|
If `dataset_name` is None, the returned `category_ids` may be |
|
incontiguous and may not conform to the Detectron2 standard format. |
|
|
|
Notes: |
|
1. This function does not read the image files. |
|
The results do not have the "image" field. |
|
""" |
|
from pycocotools.coco import COCO |
|
|
|
timer = Timer() |
|
json_file = PathManager.get_local_path(json_file) |
|
with contextlib.redirect_stdout(io.StringIO()): |
|
coco_api = COCO(json_file) |
|
if timer.seconds() > 1: |
|
logger.info( |
|
"Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()) |
|
) |
|
|
|
id_map = None |
|
if dataset_name is not None: |
|
meta = MetadataCatalog.get(dataset_name) |
|
cat_ids = sorted(coco_api.getCatIds()) |
|
cats = coco_api.loadCats(cat_ids) |
|
|
|
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])] |
|
meta.thing_classes = thing_classes |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)): |
|
if "coco" not in dataset_name: |
|
logger.warning( |
|
""" |
|
Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you. |
|
""" |
|
) |
|
id_map = {v: i for i, v in enumerate(cat_ids)} |
|
meta.thing_dataset_id_to_contiguous_id = id_map |
|
|
|
|
|
img_ids = sorted(coco_api.imgs.keys()) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
imgs = coco_api.loadImgs(img_ids) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids] |
|
total_num_valid_anns = sum([len(x) for x in anns]) |
|
total_num_anns = len(coco_api.anns) |
|
if total_num_valid_anns < total_num_anns: |
|
logger.warning( |
|
f"{json_file} contains {total_num_anns} annotations, but only " |
|
f"{total_num_valid_anns} of them match to images in the file." |
|
) |
|
|
|
if "minival" not in json_file: |
|
|
|
|
|
|
|
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image] |
|
assert len(set(ann_ids)) == len( |
|
ann_ids |
|
), "Annotation ids in '{}' are not unique!".format(json_file) |
|
|
|
imgs_anns = list(zip(imgs, anns)) |
|
logger.info( |
|
"Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file) |
|
) |
|
|
|
dataset_dicts = [] |
|
|
|
ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + ( |
|
extra_annotation_keys or [] |
|
) |
|
|
|
num_instances_without_valid_segmentation = 0 |
|
|
|
for (img_dict, anno_dict_list) in imgs_anns: |
|
record = {} |
|
record["file_name"] = os.path.join(image_root, img_dict["file_name"]) |
|
record["height"] = img_dict["height"] |
|
record["width"] = img_dict["width"] |
|
image_id = record["image_id"] = img_dict["id"] |
|
|
|
objs = [] |
|
for anno in anno_dict_list: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assert anno["image_id"] == image_id |
|
|
|
assert ( |
|
anno.get("ignore", 0) == 0 |
|
), '"ignore" in COCO json file is not supported.' |
|
|
|
obj = {key: anno[key] for key in ann_keys if key in anno} |
|
if "bbox" in obj and len(obj["bbox"]) == 0: |
|
raise ValueError( |
|
f"One annotation of image {image_id} contains empty 'bbox' value! " |
|
"This json does not have valid COCO format." |
|
) |
|
|
|
segm = anno.get("segmentation", None) |
|
if segm: |
|
if isinstance(segm, dict): |
|
if isinstance(segm["counts"], list): |
|
|
|
segm = mask_util.frPyObjects(segm, *segm["size"]) |
|
else: |
|
|
|
segm = [ |
|
poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6 |
|
] |
|
if len(segm) == 0: |
|
num_instances_without_valid_segmentation += 1 |
|
continue |
|
obj["segmentation"] = segm |
|
|
|
keypts = anno.get("keypoints", None) |
|
if keypts: |
|
for idx, v in enumerate(keypts): |
|
if idx % 3 != 2: |
|
|
|
|
|
|
|
|
|
keypts[idx] = v + 0.5 |
|
obj["keypoints"] = keypts |
|
|
|
obj["bbox_mode"] = BoxMode.XYWH_ABS |
|
if id_map: |
|
annotation_category_id = obj["category_id"] |
|
try: |
|
obj["category_id"] = id_map[annotation_category_id] |
|
except KeyError as e: |
|
raise KeyError( |
|
f"Encountered category_id={annotation_category_id} " |
|
"but this id does not exist in 'categories' of the json file." |
|
) from e |
|
objs.append(obj) |
|
record["annotations"] = objs |
|
dataset_dicts.append(record) |
|
|
|
if num_instances_without_valid_segmentation > 0: |
|
logger.warning( |
|
"Filtered out {} instances without valid segmentation. ".format( |
|
num_instances_without_valid_segmentation |
|
) |
|
+ "There might be issues in your dataset generation process. Please " |
|
"check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully" |
|
) |
|
return dataset_dicts |
|
|
|
|
|
def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"): |
|
""" |
|
Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are |
|
treated as ground truth annotations and all files under "image_root" with "image_ext" extension |
|
as input images. Ground truth and input images are matched using file paths relative to |
|
"gt_root" and "image_root" respectively without taking into account file extensions. |
|
This works for COCO as well as some other datasets. |
|
|
|
Args: |
|
gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation |
|
annotations are stored as images with integer values in pixels that represent |
|
corresponding semantic labels. |
|
image_root (str): the directory where the input images are. |
|
gt_ext (str): file extension for ground truth annotations. |
|
image_ext (str): file extension for input images. |
|
|
|
Returns: |
|
list[dict]: |
|
a list of dicts in detectron2 standard format without instance-level |
|
annotation. |
|
|
|
Notes: |
|
1. This function does not read the image and ground truth files. |
|
The results do not have the "image" and "sem_seg" fields. |
|
""" |
|
|
|
|
|
|
|
def file2id(folder_path, file_path): |
|
|
|
image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path)) |
|
|
|
image_id = os.path.splitext(image_id)[0] |
|
return image_id |
|
|
|
input_files = sorted( |
|
( |
|
os.path.join(image_root, f) |
|
for f in PathManager.ls(image_root) |
|
if f.endswith(image_ext) |
|
), |
|
key=lambda file_path: file2id(image_root, file_path), |
|
) |
|
gt_files = sorted( |
|
( |
|
os.path.join(gt_root, f) |
|
for f in PathManager.ls(gt_root) |
|
if f.endswith(gt_ext) |
|
), |
|
key=lambda file_path: file2id(gt_root, file_path), |
|
) |
|
|
|
assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root) |
|
|
|
|
|
if len(input_files) != len(gt_files): |
|
logger.warn( |
|
"Directory {} and {} has {} and {} files, respectively.".format( |
|
image_root, gt_root, len(input_files), len(gt_files) |
|
) |
|
) |
|
input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files] |
|
gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files] |
|
intersect = list(set(input_basenames) & set(gt_basenames)) |
|
|
|
intersect = sorted(intersect) |
|
logger.warn("Will use their intersection of {} files.".format(len(intersect))) |
|
input_files = [os.path.join(image_root, f + image_ext) for f in intersect] |
|
gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect] |
|
|
|
logger.info( |
|
"Loaded {} images with semantic segmentation from {}".format( |
|
len(input_files), image_root |
|
) |
|
) |
|
|
|
dataset_dicts = [] |
|
for (img_path, gt_path) in zip(input_files, gt_files): |
|
record = {} |
|
record["file_name"] = img_path |
|
record["sem_seg_file_name"] = gt_path |
|
dataset_dicts.append(record) |
|
|
|
return dataset_dicts |
|
|
|
|
|
def convert_to_coco_dict(dataset_name): |
|
""" |
|
Convert an instance detection/segmentation or keypoint detection dataset |
|
in detectron2's standard format into COCO json format. |
|
|
|
Generic dataset description can be found here: |
|
https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset |
|
|
|
COCO data format description can be found here: |
|
http://cocodataset.org/#format-data |
|
|
|
Args: |
|
dataset_name (str): |
|
name of the source dataset |
|
Must be registered in DatastCatalog and in detectron2's standard format. |
|
Must have corresponding metadata "thing_classes" |
|
Returns: |
|
coco_dict: serializable dict in COCO json format |
|
""" |
|
|
|
dataset_dicts = DatasetCatalog.get(dataset_name) |
|
metadata = MetadataCatalog.get(dataset_name) |
|
|
|
|
|
if hasattr(metadata, "thing_dataset_id_to_contiguous_id"): |
|
reverse_id_mapping = { |
|
v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items() |
|
} |
|
reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] |
|
else: |
|
reverse_id_mapper = lambda contiguous_id: contiguous_id |
|
|
|
categories = [ |
|
{"id": reverse_id_mapper(id), "name": name} |
|
for id, name in enumerate(metadata.thing_classes) |
|
] |
|
|
|
logger.info("Converting dataset dicts into COCO format") |
|
coco_images = [] |
|
coco_annotations = [] |
|
|
|
for image_id, image_dict in enumerate(dataset_dicts): |
|
coco_image = { |
|
"id": image_dict.get("image_id", image_id), |
|
"width": int(image_dict["width"]), |
|
"height": int(image_dict["height"]), |
|
"file_name": str(image_dict["file_name"]), |
|
} |
|
coco_images.append(coco_image) |
|
|
|
anns_per_image = image_dict.get("annotations", []) |
|
for annotation in anns_per_image: |
|
|
|
coco_annotation = {} |
|
|
|
|
|
bbox = annotation["bbox"] |
|
if isinstance(bbox, np.ndarray): |
|
if bbox.ndim != 1: |
|
raise ValueError( |
|
f"bbox has to be 1-dimensional. Got shape={bbox.shape}." |
|
) |
|
bbox = bbox.tolist() |
|
if len(bbox) not in [4, 5]: |
|
raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.") |
|
from_bbox_mode = annotation["bbox_mode"] |
|
to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS |
|
bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode) |
|
|
|
|
|
if "segmentation" in annotation: |
|
|
|
segmentation = annotation["segmentation"] |
|
|
|
if isinstance(segmentation, list): |
|
polygons = PolygonMasks([segmentation]) |
|
area = polygons.area()[0].item() |
|
elif isinstance(segmentation, dict): |
|
area = mask_util.area(segmentation).item() |
|
else: |
|
raise TypeError(f"Unknown segmentation type {type(segmentation)}!") |
|
else: |
|
|
|
if to_bbox_mode == BoxMode.XYWH_ABS: |
|
bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS) |
|
area = Boxes([bbox_xy]).area()[0].item() |
|
else: |
|
area = RotatedBoxes([bbox]).area()[0].item() |
|
|
|
if "keypoints" in annotation: |
|
keypoints = annotation["keypoints"] |
|
for idx, v in enumerate(keypoints): |
|
if idx % 3 != 2: |
|
|
|
|
|
|
|
|
|
keypoints[idx] = v - 0.5 |
|
if "num_keypoints" in annotation: |
|
num_keypoints = annotation["num_keypoints"] |
|
else: |
|
num_keypoints = sum(kp > 0 for kp in keypoints[2::3]) |
|
|
|
|
|
|
|
|
|
coco_annotation["id"] = len(coco_annotations) + 1 |
|
coco_annotation["image_id"] = coco_image["id"] |
|
coco_annotation["bbox"] = [round(float(x), 3) for x in bbox] |
|
coco_annotation["area"] = float(area) |
|
coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0)) |
|
coco_annotation["category_id"] = int( |
|
reverse_id_mapper(annotation["category_id"]) |
|
) |
|
|
|
|
|
if "keypoints" in annotation: |
|
coco_annotation["keypoints"] = keypoints |
|
coco_annotation["num_keypoints"] = num_keypoints |
|
|
|
if "segmentation" in annotation: |
|
seg = coco_annotation["segmentation"] = annotation["segmentation"] |
|
if isinstance(seg, dict): |
|
counts = seg["counts"] |
|
if not isinstance(counts, str): |
|
|
|
seg["counts"] = counts.decode("ascii") |
|
|
|
coco_annotations.append(coco_annotation) |
|
|
|
logger.info( |
|
"Conversion finished, " |
|
f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}" |
|
) |
|
|
|
info = { |
|
"date_created": str(datetime.datetime.now()), |
|
"description": "Automatically generated COCO json file for Detectron2.", |
|
} |
|
coco_dict = { |
|
"info": info, |
|
"images": coco_images, |
|
"categories": categories, |
|
"licenses": None, |
|
} |
|
if len(coco_annotations) > 0: |
|
coco_dict["annotations"] = coco_annotations |
|
return coco_dict |
|
|
|
|
|
def convert_to_coco_json(dataset_name, output_file, allow_cached=True): |
|
""" |
|
Converts dataset into COCO format and saves it to a json file. |
|
dataset_name must be registered in DatasetCatalog and in detectron2's standard format. |
|
|
|
Args: |
|
dataset_name: |
|
reference from the config file to the catalogs |
|
must be registered in DatasetCatalog and in detectron2's standard format |
|
output_file: path of json file that will be saved to |
|
allow_cached: if json file is already present then skip conversion |
|
""" |
|
|
|
|
|
|
|
|
|
PathManager.mkdirs(os.path.dirname(output_file)) |
|
with file_lock(output_file): |
|
if PathManager.exists(output_file) and allow_cached: |
|
logger.warning( |
|
f"Using previously cached COCO format annotations at '{output_file}'. " |
|
"You need to clear the cache file if your dataset has been modified." |
|
) |
|
else: |
|
logger.info( |
|
f"Converting annotations of dataset '{dataset_name}' to COCO format ...)" |
|
) |
|
coco_dict = convert_to_coco_dict(dataset_name) |
|
|
|
logger.info(f"Caching COCO format annotations at '{output_file}' ...") |
|
tmp_file = output_file + ".tmp" |
|
with PathManager.open(tmp_file, "w") as f: |
|
json.dump(coco_dict, f) |
|
shutil.move(tmp_file, output_file) |
|
|
|
|
|
def register_coco_instances(name, metadata, json_file, image_root): |
|
""" |
|
Register a dataset in COCO's json annotation format for |
|
instance detection, instance segmentation and keypoint detection. |
|
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data. |
|
`instances*.json` and `person_keypoints*.json` in the dataset). |
|
|
|
This is an example of how to register a new dataset. |
|
You can do something similar to this function, to register new datasets. |
|
|
|
Args: |
|
name (str): the name that identifies a dataset, e.g. "coco_2014_train". |
|
metadata (dict): extra metadata associated with this dataset. You can |
|
leave it as an empty dict. |
|
json_file (str): path to the json instance annotation file. |
|
image_root (str or path-like): directory which contains all the images. |
|
""" |
|
assert isinstance(name, str), name |
|
assert isinstance(json_file, (str, os.PathLike)), json_file |
|
assert isinstance(image_root, (str, os.PathLike)), image_root |
|
|
|
DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name)) |
|
|
|
|
|
|
|
MetadataCatalog.get(name).set( |
|
json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata |
|
) |
|
|
|
|
|
def main() -> None: |
|
global logger |
|
""" |
|
Test the COCO json dataset loader. |
|
|
|
Usage: |
|
python -m detectron2.data.datasets.coco \ |
|
path/to/json path/to/image_root dataset_name |
|
|
|
"dataset_name" can be "coco_2014_minival_100", or other |
|
pre-registered ones |
|
""" |
|
import sys |
|
|
|
import detectron2.data.datasets |
|
from detectron2.utils.logger import setup_logger |
|
from detectron2.utils.visualizer import Visualizer |
|
|
|
logger = setup_logger(name=__name__) |
|
assert sys.argv[3] in DatasetCatalog.list() |
|
meta = MetadataCatalog.get(sys.argv[3]) |
|
|
|
dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3]) |
|
logger.info("Done loading {} samples.".format(len(dicts))) |
|
|
|
dirname = "coco-data-vis" |
|
os.makedirs(dirname, exist_ok=True) |
|
for d in dicts: |
|
img = np.array(Image.open(d["file_name"])) |
|
visualizer = Visualizer(img, metadata=meta) |
|
vis = visualizer.draw_dataset_dict(d) |
|
fpath = os.path.join(dirname, os.path.basename(d["file_name"])) |
|
vis.save(fpath) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|