import json import datasets import pandas as pd logger = datasets.logging.get_logger(__name__) labels = pd.read_csv('/Users/Atharv/All scripts/Project/Multi label classification/train_classes.csv') _URL = "https://huggingface.co/datasets/subhuatharva/amazon_from_space/resolve/main/train-jpg.tar.gz?download=true" class amazon_from_space(datasets.GeneratorBasedBuilder): """Planet Dataset: Understanding Amazon Rainforest from Space""" def _info(self): return datasets.DatasetInfo( description = _DESCRIPTION, features = datasets.Features( { "image": datasets.Image(), "labels": datasets.Value("string"), } ), supervised_keys = None, homepage = "https://huggingface.co/datasets/subhuatharva/amazon_from_space", ) def _split_generators(self, dl_manager): path = dl_manager.download_and_extract(_URL) image_iters = dl_manager.iter_archive(path) return [ datasets.SplitGenerator( name = datasets.Split.TRAIN, gen_kwargs = { "images": image_iters } ), ] def _generate_examples(self, images): """This function returns the examples in the raw (text) form.""" idx = 0 # iterate through images for filepath, image in images: yield idx, { "image": {"filepath": filepath, "image": image.read()}, "text": labels['tags'][idx], } idx+=1