|
import datasets |
|
import pandas as pd |
|
import glob |
|
from pathlib import Path |
|
from PIL import Image, ImageOps |
|
|
|
_DESCRIPTION = """Photos of various plants with their major, above ground organs labeled. Includes labels for stem, leafs, fruits and flowers.""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/jpodivin/plantorgans" |
|
|
|
_CITATION = """""" |
|
|
|
_LICENSE = "MIT" |
|
|
|
_BASE_URL = "https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/" |
|
_TRAIN_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(0, 8)] |
|
_TEST_URLS = [_BASE_URL + f"sourcedata_labeled.tar.{i:02}" for i in range(8, 12)] |
|
_MASKS_URLS = [_BASE_URL + f"masks.tar.0{i}" for i in range(0, 2)] |
|
|
|
_METADATA_URLS = { |
|
'train': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_train.csv', |
|
'test': 'https://huggingface.co/datasets/jpodivin/plantorgans/resolve/main/metadata_test.csv' |
|
} |
|
|
|
|
|
class PlantOrgansConfig(datasets.BuilderConfig): |
|
"""Builder Config for PlantOrgans""" |
|
|
|
def __init__(self, data_url, metadata_urls, splits, **kwargs): |
|
"""BuilderConfig for PlantOrgans. |
|
Args: |
|
data_url: `string`, url to download the zip file from. |
|
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.data_url = data_url |
|
self.metadata_urls = metadata_urls |
|
self.splits = splits |
|
|
|
|
|
class PlantOrgans(datasets.GeneratorBasedBuilder): |
|
"""Plantorgans dataset |
|
""" |
|
BUILDER_CONFIGS = [ |
|
PlantOrgansConfig( |
|
name="semantic_segmentation_full", |
|
description="This configuration contains segmentation masks.", |
|
data_url=_BASE_URL, |
|
metadata_urls=_METADATA_URLS, |
|
splits=['train', 'test'], |
|
), |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"mask": datasets.Image(), |
|
"score": datasets.Value(dtype='double'), |
|
"image_name": datasets.Value(dtype="string") |
|
} |
|
), |
|
supervised_keys=("image", "mask"), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE, |
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
train_archives_paths = dl_manager.download_and_extract(_TRAIN_URLS) |
|
test_archives_paths = dl_manager.download_and_extract(_TEST_URLS) |
|
|
|
train_paths = [] |
|
test_paths = [] |
|
|
|
for p in train_archives_paths: |
|
train_paths.extend(glob.glob(str(p)+'/sourcedata/labeled/**.jpg')) |
|
for p in test_archives_paths: |
|
test_paths.extend(glob.glob(str(p)+'/sourcedata/labeled/**.jpg')) |
|
split_metadata_paths = dl_manager.download(_METADATA_URLS) |
|
|
|
mask_archives_paths = dl_manager.download_and_extract(_MASKS_URLS) |
|
|
|
mask_paths = [] |
|
for p in mask_archives_paths: |
|
mask_paths.extend(glob.glob(str(p)+'/masks/**.png')) |
|
|
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images": train_paths, |
|
"metadata_path": split_metadata_paths["train"], |
|
"masks_path": mask_paths, |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"images": test_paths, |
|
"metadata_path": split_metadata_paths["test"], |
|
"masks_path": mask_paths, |
|
}, |
|
), |
|
] |
|
|
|
|
|
def _generate_examples(self, images, metadata_path, masks_path): |
|
""" |
|
images: path to image directory |
|
metadata_path: path to metadata csv |
|
masks_path: path to masks |
|
""" |
|
|
|
|
|
image_paths = pd.DataFrame( |
|
[(str(Path(*Path(e).parts[-3:])), e) for e in images], columns=['image', 'image_path']) |
|
|
|
|
|
masks_paths = pd.DataFrame( |
|
[(str(Path(*Path(e).parts[-2:])), e) for e in masks_path], columns=['mask', 'mask_path']) |
|
|
|
|
|
metadata = pd.read_csv(metadata_path) |
|
|
|
|
|
metadata = metadata.merge(masks_paths, on='mask', how='inner') |
|
metadata = metadata.merge(image_paths, on='image', how='inner') |
|
|
|
|
|
for i, r in metadata.iterrows(): |
|
|
|
|
|
example = { |
|
'mask': r['mask_path'], |
|
'image': ImageOps.exif_transpose(Image.open(r['image_path'])), |
|
'image_name': Path(r['image_path']).parts[-1], |
|
'score': r['score'] |
|
} |
|
|
|
yield i, example |