import datasets from huggingface_hub import HfApi from datasets import DownloadManager, DatasetInfo from datasets.data_files import DataFilesDict import os import json from os.path import dirname, basename from pathlib import Path # ここに設定を記入 _NAME = "mb23/GraySpectrogram" _EXTENSION = [".png"] _REVISION = "main" # _HOMEPAGE = "https://github.com/fastai/imagenette" # プログラムを置く場所が決まったら、ここにホームページURLつける _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps" _DESCRIPTION = f"""\ {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets! Using for Project Learning... """ # 参考になりそうなURL集 # https://huggingface.co/docs/datasets/v1.1.1/_modules/datasets/utils/download_manager.html # https://huggingface.co/docs/datasets/package_reference/builder_classes # https://huggingface.co/datasets/animelover/danbooru2022/blob/main/danbooru2022.py # https://huggingface.co/datasets/food101/blob/main/food101.py # https://huggingface.co/docs/datasets/about_dataset_load # https://huggingface.co/datasets/frgfm/imagenette/blob/main/imagenette.py # https://huggingface.co/docs/datasets/v1.2.1/add_dataset.html # DatasetInfo : https://huggingface.co/docs/datasets/package_reference/main_classes def get_information(): # データを整理? hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0) # メタデータであるjsonlファイルのURLを取得 # ここの抽出方法変えられないかな? train_metadata_url = DataFilesDict.from_hf_repo( {datasets.Split.TRAIN: ["data/train/**"]}, dataset_info=hfh_dataset_info, allowed_extensions=["jsonl", ".jsonl"], ) test_metadata_url = DataFilesDict.from_hf_repo( {datasets.Split.TEST: ["data/test/**"]}, dataset_info=hfh_dataset_info, allowed_extensions=["jsonl", ".jsonl"], ) metadata_urls = dict() metadata_urls["train"] = train_metadata_url["train"] metadata_urls["test"] = test_metadata_url["test"] # 画像データは**.zipのURLをDict型として取得? # **.zipのURLをDict型として取得? train_data_url = DataFilesDict.from_hf_repo( {datasets.Split.TRAIN: ["data/train/**"]}, dataset_info=hfh_dataset_info, allowed_extensions=["zip", ".zip"], ) test_data_url = DataFilesDict.from_hf_repo( {datasets.Split.TEST: ["data/test/**"]}, dataset_info=hfh_dataset_info, allowed_extensions=["zip", ".zip"] ) data_urls = dict() data_urls["train"] = train_data_url["train"] data_urls["test"] = test_data_url["test"] return (metadata_urls, data_urls) class GraySpectrogramConfig(datasets.BuilderConfig): """BuilderConfig for Imagette.""" def __init__(self, data_url, metadata_url, **kwargs): """BuilderConfig for Imagette. Args: data_url: `string`, url to download the zip file from. matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs **kwargs: keyword arguments forwarded to super. """ super(GraySpectrogramConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) self.data_url = data_url self.metadata_url = metadata_url class GraySpectrogram(datasets.GeneratorBasedBuilder): # データのサブセットはここで用意 metadata_urls, data_urls = get_information() BUILDER_CONFIGS = [ GraySpectrogramConfig( name="data 0-200", description=_DESCRIPTION, data_url = { "train" : data_urls["train"][0], "test" : data_urls["test"][0] }, metadata_url = { "train" : metadata_urls["train"][0], "test" : metadata_urls["test"][0] } ), GraySpectrogramConfig( name="data 200-600", description=_DESCRIPTION, data_url ={ "train" : data_urls["train"][1], "test" : data_urls["test"][1] }, metadata_url = { "train": metadata_urls["train"][1], "test" : metadata_urls["test"][1] } ), GraySpectrogramConfig( name="data 600-1000", description=_DESCRIPTION, data_url = { "train" : data_urls["train"][2], "test" : data_urls["test"][2] }, metadata_url = { "train" : metadata_urls["train"][2], "test" : metadata_urls["test"][2] } ), GraySpectrogramConfig( name="data 1000-1300", description=_DESCRIPTION, data_url = { "train" : data_urls["train"][3], "test" : data_urls["test"][3] }, metadata_url = { "train" : metadata_urls["train"][3], "test" : metadata_urls["test"][3] } ), GraySpectrogramConfig( name="data 1300-1600", description=_DESCRIPTION, data_url = { "train" : data_urls["train"][4], "test" : data_urls["test"][4] }, metadata_url = { "train" : metadata_urls["train"][4], "test" : metadata_urls["test"][4] } ) ] def _info(self) -> DatasetInfo: return datasets.DatasetInfo( description = self.config.description, features=datasets.Features( { "image": datasets.Image(), "caption": datasets.Value("string"), "data_idx": datasets.Value("int32"), "number" : datasets.Value("int32"), "label" : datasets.ClassLabel( names=[ "blues", "classical", "country", "disco", "hiphop", "metal", "pop", "reggae", "rock", "jazz" ] ) } ), supervised_keys=("image", "caption"), homepage=_HOMEPAGE, citation= "", # license=_LICENSE, # task_templates=[ImageClassification(image_column="image", label_column="label")], ) def _split_generators(self, dl_manager: DownloadManager): train_metadata_path = dl_manager.download_and_extract(self.config.metadata_url["train"]) test_metadata_path = dl_manager.download_and_extract(self.config.metadata_url["test"]) train_data_path = dl_manager.download(self.config.data_url["train"]) test_data_path = dl_manager.download(self.config.data_url["test"]) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "images": dl_manager.iter_archive(train_data_path), "metadata_path": train_metadata_path, } ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "images": dl_manager.iter_archive(test_data_path), "metadata_path": test_metadata_path, } ), ] # # huggingfaceのディレクトリからデータを取ってくる # hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0) # # メタデータであるjsonlファイルのURLを取得 # # ここの抽出方法変えられないかな? # train_metadata_url = DataFilesDict.from_hf_repo( # {datasets.Split.TRAIN: ["data/train/**"]}, # dataset_info=hfh_dataset_info, # allowed_extensions=["jsonl", ".jsonl"], # ) # test_metadata_url = DataFilesDict.from_hf_repo( # {datasets.Split.TEST: ["data/test/**"]}, # dataset_info=hfh_dataset_info, # allowed_extensions=["jsonl", ".jsonl"], # ) # metadata_urls = dict() # metadata_urls["train"] = train_metadata_url["train"] # metadata_urls["test"] = test_metadata_url["test"] # # 画像データは**.zipのURLをDict型として取得? # # **.zipのURLをDict型として取得? # train_data_url = DataFilesDict.from_hf_repo( # {datasets.Split.TRAIN: ["data/train/**"]}, # dataset_info=hfh_dataset_info, # allowed_extensions=["zip", ".zip"], # ) # test_data_url = DataFilesDict.from_hf_repo( # {datasets.Split.TEST: ["data/test/**"]}, # dataset_info=hfh_dataset_info, # allowed_extensions=["zip", ".zip"] # ) # data_urls = dict() # data_urls["train"] = train_data_url["train"] # data_urls["test"] = test_data_url["test"] # gs = [] # # for split, file_list in data_urls.items(): # # metadata_list = metadata_urls[split] # # for i, file_ in enumerate(file_list): # # ''' # # split : "train" or "test" or "val" # # files : zip files # # ''' # # # print(file_) # # # print(metadata_list[0]) # # # # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得 # # metadata_path = dl_manager.download_and_extract(metadata_list[i]) # # downloaded_files = dl_manager.download(file_) # # # # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している? # # gs.append( # # datasets.SplitGenerator( # # name = split, # # gen_kwargs = { # # # "images" : iter(iter_archive[split]), # # "images" : dl_manager.iter_archive(downloaded_files), # # "metadata_path": metadata_path # メタデータパスを渡す # # } # # ) # # ) # return gs def _generate_examples(self, images, metadata_path): """Generate images and captions for splits.""" # with open(metadata_path, encoding="utf-8") as f: # files_to_keep = set(f.read().split("\n")) file_list = list() caption_list = list() dataIDX_list = list() num_list = list() label_list = list() with open(metadata_path, encoding="utf-8") as fin: for line in fin: data = json.loads(line) file_list.append(data["file_name"]) caption_list.append(data["caption"]) dataIDX_list.append(data["data_idx"]) num_list.append(data["number"]) label_list.append(data["label"]) for idx, (file_path, file_obj) in enumerate(images): yield file_path, { "image": { "path": file_path, "bytes": file_obj.read() }, "caption" : caption_list[idx], "data_idx" : dataIDX_list[idx], "number" : num_list[idx], "label": label_list[idx] }