File size: 4,729 Bytes
8620e7b c12c339 8620e7b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 |
import datasets
import numpy as np
import os
from PIL import Image
_SHAPES3D_URL = "https://huggingface.co/datasets/randall-lab/shapes3d/resolve/main/shapes3d.npz"
class Shapes3D(datasets.GeneratorBasedBuilder):
"""Shapes3D dataset: 10x10x10x8x4x15 factor combinations, 64x64 RGB images."""
VERSION = datasets.Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description=(
"Shapes3D dataset: procedurally generated images of 3D shapes with 6 independent factors of variation. "
"Commonly used for disentangled representation learning. "
"Factors: floor hue (10), wall hue (10), object hue (10), scale (8), shape (4), orientation (15). "
"Images are stored as the Cartesian product of the factors in row-major order."
),
features=datasets.Features(
{
"image": datasets.Image(), # (64, 64, 3)
"index": datasets.Value("int32"), # index of the image
"label": datasets.Sequence(datasets.Value("float64")), # 6 factor values (continuous)
"label_index": datasets.Sequence(datasets.Value("int64")), # 6 factor indices
"floor": datasets.Value("float64"), # value of floor (0-1)
"wall": datasets.Value("float64"), # value of wall (0-1)
"object": datasets.Value("float64"), # value of object (0-1)
"scale": datasets.Value("float64"), # value of scale (0.75-1.25)
"shape": datasets.Value("float64"), # value of shape (0-3)
"orientation": datasets.Value("float64"), # value of orientation (-30 to 30)
"floor_idx": datasets.Value("int32"),
"wall_idx": datasets.Value("int32"),
"object_idx": datasets.Value("int32"),
"scale_idx": datasets.Value("int32"),
"shape_idx": datasets.Value("int32"),
"orientation_idx": datasets.Value("int32"),
}
),
supervised_keys=("image", "label"),
homepage="https://github.com/google-deepmind/3dshapes-dataset/",
license="apache-2.0",
citation="""@InProceedings{pmlr-v80-kim18b,
title = {Disentangling by Factorising},
author = {Kim, Hyunjik and Mnih, Andriy},
booktitle = {Proceedings of the 35th International Conference on Machine Learning},
pages = {2649--2658},
year = {2018},
editor = {Dy, Jennifer and Krause, Andreas},
volume = {80},
series = {Proceedings of Machine Learning Research},
month = {10--15 Jul},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v80/kim18b/kim18b.pdf},
url = {https://proceedings.mlr.press/v80/kim18b.html}
}""",
)
def _split_generators(self, dl_manager):
npz_path = dl_manager.download(_SHAPES3D_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"npz_path": npz_path},
),
]
def _generate_examples(self, npz_path):
# Load npz
data = np.load(npz_path)
images = data["images"] # (480000, 64, 64, 3)
labels = data["labels"] # (480000, 6)
# Define factor sizes (from README / paper)
factor_sizes = np.array([10, 10, 10, 8, 4, 15])
factor_bases = np.cumprod([1] + list(factor_sizes[::-1]))[::-1][1:]
def index_to_factors(index):
factors = []
for base, size in zip(factor_bases, factor_sizes):
factor = (index // base) % size
factors.append(int(factor))
return factors
# Iterate over images
for idx in range(len(images)):
img = images[idx]
img_pil = Image.fromarray(img)
label_value = labels[idx].tolist()
label_index = index_to_factors(idx)
yield idx, {
"image": img_pil,
"index": idx,
"label": label_value,
"label_index": label_index,
"floor": label_value[0],
"wall": label_value[1],
"object": label_value[2],
"scale": label_value[3],
"shape": label_value[4],
"orientation": label_value[5],
"floor_idx": label_index[0],
"wall_idx": label_index[1],
"object_idx": label_index[2],
"scale_idx": label_index[3],
"shape_idx": label_index[4],
"orientation_idx": label_index[5],
}
|