Datasets:
mb23
/

Languages:
English
License:
GraySpectrogram / GraySpectrogram.py
mickylan2367's picture
modify : description
f122d96
import datasets
from huggingface_hub import HfApi
from datasets import DownloadManager, DatasetInfo
from datasets.data_files import DataFilesDict
import os
import json
from os.path import dirname, basename
from pathlib import Path
# ここに設定を記入
_NAME = "mb23/GraySpectrogram"
_EXTENSION = [".png"]
_REVISION = "main"
# _HOMEPAGE = "https://github.com/fastai/imagenette"
# プログラムを置く場所が決まったら、ここにホームページURLつける
_HOMEPAGE = "https://huggingface.co/datasets/mb23/GraySpectrogram"
_DESCRIPTION = f"""\
{_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
Using for Project Learning...
"""
# 参考になりそうなURL集
# https://huggingface.co/docs/datasets/v1.1.1/_modules/datasets/utils/download_manager.html
# https://huggingface.co/docs/datasets/package_reference/builder_classes
# https://huggingface.co/datasets/animelover/danbooru2022/blob/main/danbooru2022.py
# https://huggingface.co/datasets/food101/blob/main/food101.py
# https://huggingface.co/docs/datasets/about_dataset_load
# https://huggingface.co/datasets/frgfm/imagenette/blob/main/imagenette.py
# https://huggingface.co/docs/datasets/v1.2.1/add_dataset.html
# DatasetInfo : https://huggingface.co/docs/datasets/package_reference/main_classes
def get_information():
# データを整理?
hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
# メタデータであるjsonlファイルのURLを取得
# ここの抽出方法変えられないかな?
train_metadata_url = DataFilesDict.from_hf_repo(
{datasets.Split.TRAIN: ["data/train/**"]},
dataset_info=hfh_dataset_info,
allowed_extensions=["jsonl", ".jsonl"],
)
test_metadata_url = DataFilesDict.from_hf_repo(
{datasets.Split.TEST: ["data/test/**"]},
dataset_info=hfh_dataset_info,
allowed_extensions=["jsonl", ".jsonl"],
)
metadata_urls = dict()
metadata_urls["train"] = train_metadata_url["train"]
metadata_urls["test"] = test_metadata_url["test"]
# 画像データは**.zipのURLをDict型として取得?
# **.zipのURLをDict型として取得?
train_data_url = DataFilesDict.from_hf_repo(
{datasets.Split.TRAIN: ["data/train/**"]},
dataset_info=hfh_dataset_info,
allowed_extensions=["zip", ".zip"],
)
test_data_url = DataFilesDict.from_hf_repo(
{datasets.Split.TEST: ["data/test/**"]},
dataset_info=hfh_dataset_info,
allowed_extensions=["zip", ".zip"]
)
data_urls = dict()
data_urls["train"] = train_data_url["train"]
data_urls["test"] = test_data_url["test"]
return (metadata_urls, data_urls)
class GraySpectrogramConfig(datasets.BuilderConfig):
"""BuilderConfig for Imagette."""
def __init__(self, data_url, metadata_url, **kwargs):
"""BuilderConfig for Imagette.
Args:
data_url: `string`, url to download the zip file from.
matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super(GraySpectrogramConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
self.data_url = data_url
self.metadata_url = metadata_url
class GraySpectrogram(datasets.GeneratorBasedBuilder):
# データのサブセットはここで用意
metadata_urls, data_urls = get_information()
subset_name_list = [
"data 0-200",
"data 200-600",
"data 600-1000",
"data 1000-1300",
"data 1300-1600",
"data 1600-2000",
]
for i in range(2000, 2800, 200):
subset_name_list.append(f"data {i}-{i+200}")
for i in range(3000, 5200, 200):
subset_name_list.append(f"data {i}-{i+200}")
subset_name_list.append("data 5200-5520")
config_list = list()
for i in range(22):
config_list.append(
GraySpectrogramConfig(
name = subset_name_list[i],
description = _DESCRIPTION,
data_url = {
"train" : data_urls["train"][i],
"test" : data_urls["test"][i]
},
metadata_url = {
"train" : metadata_urls["train"][i],
"test" : metadata_urls["test"][i]
}
)
)
BUILDER_CONFIGS = config_list
# BUILDER_CONFIGS = [
# GraySpectrogramConfig(
# name="data 0-200",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][0],
# "test" : data_urls["test"][0]
# },
# metadata_url = {
# "train" : metadata_urls["train"][0],
# "test" : metadata_urls["test"][0]
# }
# ),
# GraySpectrogramConfig(
# name="data 200-600",
# description=_DESCRIPTION,
# data_url ={
# "train" : data_urls["train"][1],
# "test" : data_urls["test"][1]
# },
# metadata_url = {
# "train": metadata_urls["train"][1],
# "test" : metadata_urls["test"][1]
# }
# ),
# GraySpectrogramConfig(
# name="data 600-1000",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][2],
# "test" : data_urls["test"][2]
# },
# metadata_url = {
# "train" : metadata_urls["train"][2],
# "test" : metadata_urls["test"][2]
# }
# ),
# GraySpectrogramConfig(
# name="data 1000-1300",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][3],
# "test" : data_urls["test"][3]
# },
# metadata_url = {
# "train" : metadata_urls["train"][3],
# "test" : metadata_urls["test"][3]
# }
# ),
# GraySpectrogramConfig(
# name="data 1300-1600",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][4],
# "test" : data_urls["test"][4]
# },
# metadata_url = {
# "train" : metadata_urls["train"][4],
# "test" : metadata_urls["test"][4]
# }
# ),
# GraySpectrogramConfig(
# name="data 1600-2000",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][5],
# "test" : data_urls["test"][5]
# },
# metadata_url = {
# "train" : metadata_urls["train"][5],
# "test" : metadata_urls["test"][5]
# }
# ),
# GraySpectrogramConfig(
# name="data 2000-2200",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][6],
# "test" : data_urls["test"][6]
# },
# metadata_url = {
# "train" : metadata_urls["train"][6],
# "test" : metadata_urls["test"][6]
# }
# ),
# GraySpectrogramConfig(
# name="data 2200-2600",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][7],
# "test" : data_urls["test"][7]
# },
# metadata_url = {
# "train" : metadata_urls["train"][7],
# "test" : metadata_urls["test"][7]
# }
# ),
# GraySpectrogramConfig(
# name="data 2600-2800",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][8],
# "test" : data_urls["test"][8]
# },
# metadata_url = {
# "train" : metadata_urls["train"][8],
# "test" : metadata_urls["test"][8]
# }
# ),
# GraySpectrogramConfig(
# name="data 3000-3200",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][9],
# "test" : data_urls["test"][9]
# },
# metadata_url = {
# "train" : metadata_urls["train"][9],
# "test" : metadata_urls["test"][9]
# }
# ),
# GraySpectrogramConfig(
# name="data 3200-3400",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][10],
# "test" : data_urls["test"][10]
# },
# metadata_url = {
# "train" : metadata_urls["train"][11],
# "test" : metadata_urls["test"][11]
# }
# ),
# GraySpectrogramConfig(
# name="data 3400-3600",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][12],
# "test" : data_urls["test"][12]
# },
# metadata_url = {
# "train" : metadata_urls["train"][12],
# "test" : metadata_urls["test"][12]
# }
# ),
# GraySpectrogramConfig(
# name="data 3600-3800",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][13],
# "test" : data_urls["test"][1]
# },
# metadata_url = {
# "train" : metadata_urls["train"][14],
# "test" : metadata_urls["test"][14]
# }
# ),
# GraySpectrogramConfig(
# name="data 3800-4000",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][15],
# "test" : data_urls["test"][15]
# },
# metadata_url = {
# "train" : metadata_urls["train"][15],
# "test" : metadata_urls["test"][15]
# }
# ),
# GraySpectrogramConfig(
# name="data 4000-4200",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][16],
# "test" : data_urls["test"][16]
# },
# metadata_url = {
# "train" : metadata_urls["train"][16],
# "test" : metadata_urls["test"][16]
# }
# ),
# GraySpectrogramConfig(
# name="data 4200-4400",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][17],
# "test" : data_urls["test"][17]
# },
# metadata_url = {
# "train" : metadata_urls["train"][17],
# "test" : metadata_urls["test"][17]
# }
# ),
# GraySpectrogramConfig(
# name="data 4400-4600",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][18],
# "test" : data_urls["test"][18]
# },
# metadata_url = {
# "train" : metadata_urls["train"][18],
# "test" : metadata_urls["test"][18]
# }
# ),
# GraySpectrogramConfig(
# name="data 4600-4800",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][19],
# "test" : data_urls["test"][19]
# },
# metadata_url = {
# "train" : metadata_urls["train"][19],
# "test" : metadata_urls["test"][19]
# }
# ),
# GraySpectrogramConfig(
# name="data 4800-5000",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][20],
# "test" : data_urls["test"][20]
# },
# metadata_url = {
# "train" : metadata_urls["train"][20],
# "test" : metadata_urls["test"][20]
# }
# ),
# GraySpectrogramConfig(
# name="data 5000-5200",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][21],
# "test" : data_urls["test"][21]
# },
# metadata_url = {
# "train" : metadata_urls["train"][4],
# "test" : metadata_urls["test"][4]
# }
# ),
# GraySpectrogramConfig(
# name="data 5200-5520",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][4],
# "test" : data_urls["test"][4]
# },
# metadata_url = {
# "train" : metadata_urls["train"][4],
# "test" : metadata_urls["test"][4]
# }
# ),
# GraySpectrogramConfig(
# name="data 2800-3000",
# description=_DESCRIPTION,
# data_url = {
# "train" : data_urls["train"][4],
# "test" : data_urls["test"][4]
# },
# metadata_url = {
# "train" : metadata_urls["train"][4],
# "test" : metadata_urls["test"][4]
# }
# )
# ]
def _info(self) -> DatasetInfo:
return datasets.DatasetInfo(
description = self.config.description,
features=datasets.Features(
{
"image": datasets.Image(),
"caption": datasets.Value("string"),
"data_idx": datasets.Value("int32"),
"number" : datasets.Value("int32"),
"label" : datasets.ClassLabel(
names=[
"blues",
"classical",
"country",
"disco",
"hiphop",
"metal",
"pop",
"reggae",
"rock",
"jazz"
]
)
}
),
supervised_keys=("image", "caption"),
homepage=_HOMEPAGE,
citation= "",
# license=_LICENSE,
# task_templates=[ImageClassification(image_column="image", label_column="label")],
)
def _split_generators(self, dl_manager: DownloadManager):
train_metadata_path = dl_manager.download_and_extract(self.config.metadata_url["train"])
test_metadata_path = dl_manager.download_and_extract(self.config.metadata_url["test"])
train_data_path = dl_manager.download(self.config.data_url["train"])
test_data_path = dl_manager.download(self.config.data_url["test"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": dl_manager.iter_archive(train_data_path),
"metadata_path": train_metadata_path,
}
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"images": dl_manager.iter_archive(test_data_path),
"metadata_path": test_metadata_path,
}
),
]
# # huggingfaceのディレクトリからデータを取ってくる
# hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
# # メタデータであるjsonlファイルのURLを取得
# # ここの抽出方法変えられないかな?
# train_metadata_url = DataFilesDict.from_hf_repo(
# {datasets.Split.TRAIN: ["data/train/**"]},
# dataset_info=hfh_dataset_info,
# allowed_extensions=["jsonl", ".jsonl"],
# )
# test_metadata_url = DataFilesDict.from_hf_repo(
# {datasets.Split.TEST: ["data/test/**"]},
# dataset_info=hfh_dataset_info,
# allowed_extensions=["jsonl", ".jsonl"],
# )
# metadata_urls = dict()
# metadata_urls["train"] = train_metadata_url["train"]
# metadata_urls["test"] = test_metadata_url["test"]
# # 画像データは**.zipのURLをDict型として取得?
# # **.zipのURLをDict型として取得?
# train_data_url = DataFilesDict.from_hf_repo(
# {datasets.Split.TRAIN: ["data/train/**"]},
# dataset_info=hfh_dataset_info,
# allowed_extensions=["zip", ".zip"],
# )
# test_data_url = DataFilesDict.from_hf_repo(
# {datasets.Split.TEST: ["data/test/**"]},
# dataset_info=hfh_dataset_info,
# allowed_extensions=["zip", ".zip"]
# )
# data_urls = dict()
# data_urls["train"] = train_data_url["train"]
# data_urls["test"] = test_data_url["test"]
# gs = []
# # for split, file_list in data_urls.items():
# # metadata_list = metadata_urls[split]
# # for i, file_ in enumerate(file_list):
# # '''
# # split : "train" or "test" or "val"
# # files : zip files
# # '''
# # # print(file_)
# # # print(metadata_list[0])
# # # # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
# # metadata_path = dl_manager.download_and_extract(metadata_list[i])
# # downloaded_files = dl_manager.download(file_)
# # # # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
# # gs.append(
# # datasets.SplitGenerator(
# # name = split,
# # gen_kwargs = {
# # # "images" : iter(iter_archive[split]),
# # "images" : dl_manager.iter_archive(downloaded_files),
# # "metadata_path": metadata_path # メタデータパスを渡す
# # }
# # )
# # )
# return gs
def _generate_examples(self, images, metadata_path):
"""Generate images and captions for splits."""
# with open(metadata_path, encoding="utf-8") as f:
# files_to_keep = set(f.read().split("\n"))
file_list = list()
caption_list = list()
dataIDX_list = list()
num_list = list()
label_list = list()
with open(metadata_path, encoding="utf-8") as fin:
for line in fin:
data = json.loads(line)
file_list.append(data["file_name"])
caption_list.append(data["caption"])
dataIDX_list.append(data["data_idx"])
num_list.append(data["number"])
label_list.append(data["label"])
for idx, (file_path, file_obj) in enumerate(images):
yield file_path, {
"image": {
"path": file_path,
"bytes": file_obj.read()
},
"caption" : caption_list[idx],
"data_idx" : dataIDX_list[idx],
"number" : num_list[idx],
"label": label_list[idx]
}