Datasets:
mb23
/

Languages:
English
License:
mickylan2367 commited on
Commit
f9a5794
·
1 Parent(s): 890e907

changed loading script

Browse files
GraySpectrogram2.py → GraySpectrogram.py RENAMED
@@ -10,7 +10,7 @@ from pathlib import Path
10
 
11
 
12
  # ここに設定を記入
13
- _NAME = "mickylan2367/LoadingScriptPractice"
14
  _EXTENSION = [".png"]
15
  _REVISION = "main"
16
 
@@ -35,17 +35,13 @@ Using for Project Learning...
35
 
36
 
37
 
38
- class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
39
 
40
  # データのサブセットはここで用意
41
  BUILDER_CONFIGS = [
42
  datasets.BuilderConfig(
43
  name="train",
44
  description=_DESCRIPTION,
45
- # data_url = train_data_url["train"][0],
46
- # metadata_urls = {
47
- # "train" : train_metadata_paths["train"][0]
48
- # }
49
  )
50
  ]
51
 
@@ -85,54 +81,82 @@ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
85
  # huggingfaceのディレクトリからデータを取ってくる
86
  hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
87
 
 
88
  metadata_urls = DataFilesDict.from_hf_repo(
89
  {datasets.Split.TRAIN: ["**"]},
90
  dataset_info=hfh_dataset_info,
91
  allowed_extensions=["jsonl", ".jsonl"],
92
  )
93
 
94
- # **.zipのURLをDict型として取得?
 
 
 
 
 
 
 
 
95
  data_urls = DataFilesDict.from_hf_repo(
96
  {datasets.Split.TRAIN: ["**"]},
97
  dataset_info=hfh_dataset_info,
98
  allowed_extensions=["zip", ".zip"],
99
  )
100
 
101
- data_paths = dict()
 
 
102
  for path in data_urls["train"]:
103
  dname = dirname(path)
104
  folder = basename(Path(dname))
105
- data_paths[folder] = path
106
-
107
- metadata_paths = dict()
108
- for path in metadata_urls["train"]:
109
- dname = dirname(path)
110
- folder = basename(Path(dname))
111
- metadata_paths[folder] = path
112
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
113
 
114
  gs = []
115
- for split, files in data_paths.items():
116
  '''
117
  split : "train" or "test" or "val"
118
  files : zip files
119
  '''
120
  # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
121
  metadata_path = dl_manager.download_and_extract(metadata_paths[split])
122
- downloaded_files_path = dl_manager.download(files)
123
-
124
  # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
125
  gs.append(
126
- datasets.SplitGenerator(
127
- name = split,
128
- gen_kwargs={
129
- "images" : dl_manager.iter_archive(downloaded_files_path),
130
- "metadata_path": metadata_path
131
- }
132
- )
133
  )
134
  return gs
135
 
 
136
  def _generate_examples(self, images, metadata_path):
137
  """Generate images and captions for splits."""
138
  # with open(metadata_path, encoding="utf-8") as f:
@@ -163,5 +187,3 @@ class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
163
  "number" : num_list[idx],
164
  "label": label_list[idx]
165
  }
166
-
167
-
 
10
 
11
 
12
  # ここに設定を記入
13
+ _NAME = "mickylan2367/GraySpectrogram"
14
  _EXTENSION = [".png"]
15
  _REVISION = "main"
16
 
 
35
 
36
 
37
 
38
+ class GraySpectrogram2(datasets.GeneratorBasedBuilder):
39
 
40
  # データのサブセットはここで用意
41
  BUILDER_CONFIGS = [
42
  datasets.BuilderConfig(
43
  name="train",
44
  description=_DESCRIPTION,
 
 
 
 
45
  )
46
  ]
47
 
 
81
  # huggingfaceのディレクトリからデータを取ってくる
82
  hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
83
 
84
+ # メタデータであるjsonlファイルのURLを取得
85
  metadata_urls = DataFilesDict.from_hf_repo(
86
  {datasets.Split.TRAIN: ["**"]},
87
  dataset_info=hfh_dataset_info,
88
  allowed_extensions=["jsonl", ".jsonl"],
89
  )
90
 
91
+ # 辞書型にしてURLを格納し直す <- 正しい辞書型にできていたら、必要ないかも
92
+ metadata_paths = dict()
93
+ for path in metadata_urls["train"]:
94
+ dname = dirname(path)
95
+ folder = basename(Path(dname))
96
+ # fname = basename(path)
97
+ metadata_paths[folder] = path
98
+
99
+ # 画像データは**.zipのURLをDict型として取得?
100
  data_urls = DataFilesDict.from_hf_repo(
101
  {datasets.Split.TRAIN: ["**"]},
102
  dataset_info=hfh_dataset_info,
103
  allowed_extensions=["zip", ".zip"],
104
  )
105
 
106
+ data_url = dict()
107
+ train_data_url = list()
108
+ test_data_url = list()
109
  for path in data_urls["train"]:
110
  dname = dirname(path)
111
  folder = basename(Path(dname))
112
+ # 辞書型
113
+ if folder=="train":
114
+ train_data_url.append(path)
115
+ if folder == "test":
116
+ test_data_url.append(path)
117
+
118
+ data_url["train"] = train_data_url
119
+ data_url["test"] = test_data_url
120
+
121
+ # iteration
122
+ iter_archive = dict()
123
+ for split, files in data_url.items():
124
+ file_name_obj = list()
125
+ for file_ in files:
126
+ downloaded_files_path = dl_manager.download(file_)
127
+ for file_obj in dl_manager.iter_archive(downloaded_files_path):
128
+ # print(file_obj)
129
+ if file_obj[0].startswith('content/'):
130
+ fname = basename(file_obj[0])
131
+ file_obj = (fname, file_obj[1])
132
+ file_name_obj.append(file_obj)
133
+ # print(file_obj)
134
+ else:
135
+ file_name_obj.append(file_obj)
136
+ # print(file_obj)
137
+ iter_archive[split] = file_name_obj
138
 
139
  gs = []
140
+ for split, files in iter_archive.items():
141
  '''
142
  split : "train" or "test" or "val"
143
  files : zip files
144
  '''
145
  # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
146
  metadata_path = dl_manager.download_and_extract(metadata_paths[split])
 
 
147
  # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
148
  gs.append(
149
+ datasets.SplitGenerator(
150
+ name = split,
151
+ gen_kwargs = {
152
+ "images" : iter(iter_archive[split]),
153
+ "metadata_path": metadata_path # メタデータパスを渡す
154
+ }
155
+ )
156
  )
157
  return gs
158
 
159
+
160
  def _generate_examples(self, images, metadata_path):
161
  """Generate images and captions for splits."""
162
  # with open(metadata_path, encoding="utf-8") as f:
 
187
  "number" : num_list[idx],
188
  "label": label_list[idx]
189
  }
 
 
data/test/metadata.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/test/test_0002.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b799a2c436036f92650f195914f8fccf9fc6fb81378438f0f94a97828cff70d
3
+ size 95043607
data/test/test_0003.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:145c674bf0d7f296b96a9795e406d870e465eb3269f12c6ab7e98db3e9ea8015
3
+ size 53560142
data/train/metadata.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/train/{data_0000.zip → train_0000.zip} RENAMED
File without changes
data/train/{data_0001.zip → train_0001.zip} RENAMED
File without changes
data/train/train_0002.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38feda20bf89720100fd388ed7f78214177a958a4f6214c8460bc03bb21e3142
3
+ size 71536214
data/train/train_0003.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4391664cb29e687e63d591f5b9350883352ec0db0f32f1e8e0c780b463ef1532
3
+ size 73587192