|
|
|
import datasets |
|
from huggingface_hub import HfApi |
|
from datasets import DownloadManager, DatasetInfo |
|
from datasets.data_files import DataFilesDict |
|
import os |
|
import json |
|
from os.path import dirname, basename |
|
from pathlib import Path |
|
|
|
|
|
|
|
_NAME = "mb23/GraySpectrogram" |
|
_EXTENSION = [".png"] |
|
_REVISION = "main" |
|
|
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps" |
|
|
|
_DESCRIPTION = f"""\ |
|
{_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets! |
|
Using for Project Learning... |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_information(): |
|
|
|
hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0) |
|
|
|
|
|
|
|
train_metadata_url = DataFilesDict.from_hf_repo( |
|
{datasets.Split.TRAIN: ["data/train/**"]}, |
|
dataset_info=hfh_dataset_info, |
|
allowed_extensions=["jsonl", ".jsonl"], |
|
) |
|
|
|
test_metadata_url = DataFilesDict.from_hf_repo( |
|
{datasets.Split.TEST: ["data/test/**"]}, |
|
dataset_info=hfh_dataset_info, |
|
allowed_extensions=["jsonl", ".jsonl"], |
|
) |
|
|
|
|
|
metadata_urls = dict() |
|
metadata_urls["train"] = train_metadata_url["train"] |
|
metadata_urls["test"] = test_metadata_url["test"] |
|
|
|
|
|
|
|
train_data_url = DataFilesDict.from_hf_repo( |
|
{datasets.Split.TRAIN: ["data/train/**"]}, |
|
dataset_info=hfh_dataset_info, |
|
allowed_extensions=["zip", ".zip"], |
|
) |
|
|
|
test_data_url = DataFilesDict.from_hf_repo( |
|
{datasets.Split.TEST: ["data/test/**"]}, |
|
dataset_info=hfh_dataset_info, |
|
allowed_extensions=["zip", ".zip"] |
|
) |
|
data_urls = dict() |
|
data_urls["train"] = train_data_url["train"] |
|
data_urls["test"] = test_data_url["test"] |
|
return (metadata_urls, data_urls) |
|
|
|
|
|
|
|
class GraySpectrogramConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Imagette.""" |
|
|
|
def __init__(self, data_url, metadata_url, **kwargs): |
|
"""BuilderConfig for Imagette. |
|
Args: |
|
data_url: `string`, url to download the zip file from. |
|
matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(GraySpectrogramConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
self.data_url = data_url |
|
self.metadata_url = metadata_url |
|
|
|
class GraySpectrogram(datasets.GeneratorBasedBuilder): |
|
|
|
|
|
metadata_urls, data_urls = get_information() |
|
BUILDER_CONFIGS = [ |
|
GraySpectrogramConfig( |
|
name="data 0-200", |
|
description=_DESCRIPTION, |
|
data_url = { |
|
"train" : data_urls["train"][0], |
|
"test" : data_urls["test"][0] |
|
}, |
|
metadata_url = { |
|
"train" : metadata_urls["train"][0], |
|
"test" : metadata_urls["test"][0] |
|
} |
|
|
|
), |
|
GraySpectrogramConfig( |
|
name="data 200-600", |
|
description=_DESCRIPTION, |
|
data_url ={ |
|
"train" : data_urls["train"][1], |
|
"test" : data_urls["test"][1] |
|
}, |
|
metadata_url = { |
|
"train": metadata_urls["train"][1], |
|
"test" : metadata_urls["test"][1] |
|
} |
|
|
|
), |
|
GraySpectrogramConfig( |
|
name="data 600-1000", |
|
description=_DESCRIPTION, |
|
data_url = { |
|
"train" : data_urls["train"][2], |
|
"test" : data_urls["test"][2] |
|
}, |
|
metadata_url = { |
|
"train" : metadata_urls["train"][2], |
|
"test" : metadata_urls["test"][2] |
|
} |
|
), |
|
GraySpectrogramConfig( |
|
name="data 1000-1300", |
|
description=_DESCRIPTION, |
|
data_url = { |
|
"train" : data_urls["train"][3], |
|
"test" : data_urls["test"][3] |
|
}, |
|
metadata_url = { |
|
"train" : metadata_urls["train"][3], |
|
"test" : metadata_urls["test"][3] |
|
} |
|
|
|
), |
|
GraySpectrogramConfig( |
|
name="data 1300-1600", |
|
description=_DESCRIPTION, |
|
data_url = { |
|
"train" : data_urls["train"][4], |
|
"test" : data_urls["test"][4] |
|
}, |
|
metadata_url = { |
|
"train" : metadata_urls["train"][4], |
|
"test" : metadata_urls["test"][4] |
|
} |
|
) |
|
] |
|
|
|
def _info(self) -> DatasetInfo: |
|
return datasets.DatasetInfo( |
|
description = self.config.description, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"caption": datasets.Value("string"), |
|
"data_idx": datasets.Value("int32"), |
|
"number" : datasets.Value("int32"), |
|
"label" : datasets.ClassLabel( |
|
names=[ |
|
"blues", |
|
"classical", |
|
"country", |
|
"disco", |
|
"hiphop", |
|
"metal", |
|
"pop", |
|
"reggae", |
|
"rock", |
|
"jazz" |
|
] |
|
) |
|
} |
|
), |
|
supervised_keys=("image", "caption"), |
|
homepage=_HOMEPAGE, |
|
citation= "", |
|
|
|
|
|
) |
|
|
|
def _split_generators(self, dl_manager: DownloadManager): |
|
|
|
train_metadata_path = dl_manager.download_and_extract(self.config.metadata_url["train"]) |
|
test_metadata_path = dl_manager.download_and_extract(self.config.metadata_url["test"]) |
|
train_data_path = dl_manager.download(self.config.data_url["train"]) |
|
test_data_path = dl_manager.download(self.config.data_url["test"]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images": dl_manager.iter_archive(train_data_path), |
|
"metadata_path": train_metadata_path, |
|
} |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"images": dl_manager.iter_archive(test_data_path), |
|
"metadata_path": test_metadata_path, |
|
} |
|
), |
|
] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _generate_examples(self, images, metadata_path): |
|
"""Generate images and captions for splits.""" |
|
|
|
|
|
file_list = list() |
|
caption_list = list() |
|
dataIDX_list = list() |
|
num_list = list() |
|
label_list = list() |
|
|
|
with open(metadata_path, encoding="utf-8") as fin: |
|
for line in fin: |
|
data = json.loads(line) |
|
file_list.append(data["file_name"]) |
|
caption_list.append(data["caption"]) |
|
dataIDX_list.append(data["data_idx"]) |
|
num_list.append(data["number"]) |
|
label_list.append(data["label"]) |
|
|
|
for idx, (file_path, file_obj) in enumerate(images): |
|
yield file_path, { |
|
"image": { |
|
"path": file_path, |
|
"bytes": file_obj.read() |
|
}, |
|
"caption" : caption_list[idx], |
|
"data_idx" : dataIDX_list[idx], |
|
"number" : num_list[idx], |
|
"label": label_list[idx] |
|
} |
|
|