File size: 3,740 Bytes
7d5136d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
# This code is an adaptation of https://huggingface.co/spaces/ybelkada/cocoevaluate

from typing import Any, Callable, Dict, List, Optional, Tuple, Union

import torch
from datasets import Dataset
from PIL import Image
from torchvision.datasets.vision import VisionDataset

_TYPING_BOXES = Tuple[float, float, float, float]
_TYPING_ANNOTS = Dict[str, Union[int, str, _TYPING_BOXES]]
_TYPING_LABELS = Dict[str, torch.Tensor]

class COCODataset(VisionDataset):
    """
    A class that extends VisionDataset and represents a COCO detection dataset.
    """

    def __init__(
        self,
        loaded_json: _TYPING_ANNOTS,
        ids_mapping: Dict[int, int],
        dataset: Dataset,
        transforms: Optional[Callable] = None,
        transform: Optional[Callable] = None,
        target_transform: Optional[Callable] = None,
    ) -> None:
        """
        Arguments:
            loaded_json: A dictionary that contains loaded json.
            ids_mapping (Dict[int, int]): A dictionary that maps the index to the id.
            dataset (Dataset): The data which is going to be used.
            transforms (Optional): A function/transform that takes in an PIL image
                and returns a transformed version.
            transform (Optional): A function/transform that takes in an PIL image
                and returns a transformed version. E.g, ``transforms.RandomCrop``.
            target_transform (Optional): A function/transform that takes in the
                target and transforms it.
        """
        root = ""
        super().__init__(root, transforms, transform, target_transform)

        self.ids_mapping = ids_mapping
        self.dataset = dataset
        
        self.images = {img["id"]: img for img in loaded_json["images"]}
        self.ids = sorted(self.images)
        self.annotations = {}
        for annot in loaded_json["annotations"]:
            img_id = annot["image_id"]
            self.annotations.setdefault(img_id, []).append(annot)

    def _load_image(self, idx: int) -> Image:
        """
        Load an image given its id.

        Arguments:
            idx: Index of the image to be loaded.

        Returns:
            PIL Image instance.
        """
        id = self.ids_mapping[idx]
        img = self.dataset[id]["image"].convert("RGB")
        return img

    def _load_target(self, idx: int) -> List[Any]:
        """
        Load the annotations of an image given its id.

        Arguments:
            idx: Index of the image to load its annotations.

        Returns:
            List containing the annotations of the image.
        """
        if idx not in self.annotations:
            return []
        return self.annotations[idx]

    def __len__(self) -> int:
        """
        Returns the number of elements in the dataset.

        Returns:
            int: Number of images in the dataset.
        """
        return len(self.ids)

    def __getitem__(self, index: int) -> Dict[str, Union[torch.Tensor, _TYPING_LABELS]]:
        """
        Given an index, it preprocesses and returns the image and its associated annotations \
            at a that index.

        Arguments:
            index: Index of the image.

        Returns:
            Dictionary containing preprocessed image as pixel values and its associated \
                annotations as labels.
        """
        image_id = self.ids[index]
        # PIL Image
        image = self._load_image(image_id)  
        # List of annotation dicts 'id', 'category_id', 'iscrowd', 'imageid', 'area', 'bbox'
        annot_dicts = self._load_target(image_id)
       
        target = {"image_id": image_id, "annotations": annot_dicts}
        return {"image": image, "target": target}