Datasets:
mb23
/

Languages:
English
License:
mickylan2367 commited on
Commit
d335f0f
·
1 Parent(s): 3d5ac26

changed repository

Browse files
GraySpectrogram2.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import datasets
3
+ from huggingface_hub import HfApi
4
+ from datasets import DownloadManager, DatasetInfo
5
+ from datasets.data_files import DataFilesDict
6
+ import os
7
+ import json
8
+ from os.path import dirname, basename
9
+ from pathlib import Path
10
+
11
+
12
+ # ここに設定を記入
13
+ _NAME = "mickylan2367/LoadingScriptPractice"
14
+ _EXTENSION = [".png"]
15
+ _REVISION = "main"
16
+
17
+ # _HOMEPAGE = "https://github.com/fastai/imagenette"
18
+ # プログラムを置く場所が決まったら、ここにホームページURLつける
19
+ _HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps"
20
+
21
+ _DESCRIPTION = f"""\
22
+ {_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
23
+ Using for Project Learning...
24
+ """
25
+
26
+ # 参考になりそうなURL集
27
+ # https://huggingface.co/docs/datasets/v1.1.1/_modules/datasets/utils/download_manager.html
28
+ # https://huggingface.co/docs/datasets/package_reference/builder_classes
29
+ # https://huggingface.co/datasets/animelover/danbooru2022/blob/main/danbooru2022.py
30
+ # https://huggingface.co/datasets/food101/blob/main/food101.py
31
+ # https://huggingface.co/docs/datasets/about_dataset_load
32
+ # https://huggingface.co/datasets/frgfm/imagenette/blob/main/imagenette.py
33
+ # https://huggingface.co/docs/datasets/v1.2.1/add_dataset.html
34
+ # DatasetInfo : https://huggingface.co/docs/datasets/package_reference/main_classes
35
+
36
+
37
+
38
+ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
39
+
40
+ # データのサブセットはここで用意
41
+ BUILDER_CONFIGS = [
42
+ datasets.BuilderConfig(
43
+ name="train",
44
+ description=_DESCRIPTION,
45
+ # data_url = train_data_url["train"][0],
46
+ # metadata_urls = {
47
+ # "train" : train_metadata_paths["train"][0]
48
+ # }
49
+ )
50
+ ]
51
+
52
+ def _info(self) -> DatasetInfo:
53
+ return datasets.DatasetInfo(
54
+ description = self.config.description,
55
+ features=datasets.Features(
56
+ {
57
+ "image": datasets.Image(),
58
+ "caption": datasets.Value("string"),
59
+ "data_idx": datasets.Value("int32"),
60
+ "number" : datasets.Value("int32"),
61
+ "label" : datasets.ClassLabel(
62
+ names=[
63
+ "blues",
64
+ "classical",
65
+ "country",
66
+ "disco",
67
+ "hiphop",
68
+ "metal",
69
+ "pop",
70
+ "reggae",
71
+ "rock",
72
+ "jazz"
73
+ ]
74
+ )
75
+ }
76
+ ),
77
+ supervised_keys=("image", "caption"),
78
+ homepage=_HOMEPAGE,
79
+ citation= "",
80
+ # license=_LICENSE,
81
+ # task_templates=[ImageClassification(image_column="image", label_column="label")],
82
+ )
83
+
84
+ def _split_generators(self, dl_manager: DownloadManager):
85
+ # huggingfaceのディレクトリからデータを取ってくる
86
+ hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
87
+
88
+ metadata_urls = DataFilesDict.from_hf_repo(
89
+ {datasets.Split.TRAIN: ["**"]},
90
+ dataset_info=hfh_dataset_info,
91
+ allowed_extensions=["jsonl", ".jsonl"],
92
+ )
93
+
94
+ # **.zipのURLをDict型として取得?
95
+ data_urls = DataFilesDict.from_hf_repo(
96
+ {datasets.Split.TRAIN: ["**"]},
97
+ dataset_info=hfh_dataset_info,
98
+ allowed_extensions=["zip", ".zip"],
99
+ )
100
+
101
+ data_paths = dict()
102
+ for path in data_urls["train"]:
103
+ dname = dirname(path)
104
+ folder = basename(Path(dname))
105
+ data_paths[folder] = path
106
+
107
+ metadata_paths = dict()
108
+ for path in metadata_urls["train"]:
109
+ dname = dirname(path)
110
+ folder = basename(Path(dname))
111
+ metadata_paths[folder] = path
112
+
113
+
114
+ gs = []
115
+ for split, files in data_paths.items():
116
+ '''
117
+ split : "train" or "test" or "val"
118
+ files : zip files
119
+ '''
120
+ # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
121
+ metadata_path = dl_manager.download_and_extract(metadata_paths[split])
122
+ downloaded_files_path = dl_manager.download(files)
123
+
124
+ # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
125
+ gs.append(
126
+ datasets.SplitGenerator(
127
+ name = split,
128
+ gen_kwargs={
129
+ "images" : dl_manager.iter_archive(downloaded_files_path),
130
+ "metadata_path": metadata_path
131
+ }
132
+ )
133
+ )
134
+ return gs
135
+
136
+ def _generate_examples(self, images, metadata_path):
137
+ """Generate images and captions for splits."""
138
+ # with open(metadata_path, encoding="utf-8") as f:
139
+ # files_to_keep = set(f.read().split("\n"))
140
+ file_list = list()
141
+ caption_list = list()
142
+ dataIDX_list = list()
143
+ num_list = list()
144
+ label_list = list()
145
+
146
+ with open(metadata_path) as fin:
147
+ for line in fin:
148
+ data = json.loads(line)
149
+ file_list.append(data["file_name"])
150
+ caption_list.append(data["caption"])
151
+ dataIDX_list.append(data["data_idx"])
152
+ num_list.append(data["number"])
153
+ label_list.append(data["label"])
154
+
155
+ for idx, (file_path, file_obj) in enumerate(images):
156
+ yield file_path, {
157
+ "image": {
158
+ "path": file_path,
159
+ "bytes": file_obj.read()
160
+ },
161
+ "caption" : caption_list[idx],
162
+ "data_idx" : dataIDX_list[idx],
163
+ "number" : num_list[idx],
164
+ "label": label_list[idx]
165
+ }
166
+
167
+
data/test/metadata.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
train.zip → data/test/test_0000.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:64babd81774429ab6e21398ebb878eb433ec5c4c252f52fe676869243cf8ed38
3
- size 1950997
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2712c77cf2d69eeb461aedee6a740819d39eca680891bdb94c2f1ce9f56b284d
3
+ size 38553364
data/test/test_0001.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abf218a91459e737a4a32a9b013bdcb3aa507eed74b9cef604dcd692fda2cbbc
3
+ size 83589703
data/train/data_0000.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fa69a62fe1813e0a19003d6032dbfab4fb0f6ea2711e91e236910ac87a98532
3
+ size 42251910
data/train/data_0001.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d37f856fa5f3eaec7db47e2e6915744a7aa6140ecb9cb755661835dcb53307e
3
+ size 82148422
data/train/metadata.jsonl ADDED
The diff for this file is too large to render. See raw diff