Datasets:
mb23
/

Languages:
English
License:
mickylan2367 commited on
Commit
02219b3
·
1 Parent(s): 18b90aa

added another contructure

Browse files
GraySpectrogram.py CHANGED
@@ -82,78 +82,65 @@ class GraySpectrogram2(datasets.GeneratorBasedBuilder):
82
  hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
83
 
84
  # メタデータであるjsonlファイルのURLを取得
85
- metadata_urls = DataFilesDict.from_hf_repo(
86
- {datasets.Split.TRAIN: ["**"]},
 
87
  dataset_info=hfh_dataset_info,
88
  allowed_extensions=["jsonl", ".jsonl"],
89
  )
90
 
91
- # 辞書型にしてURLを格納し直す <- 正しい辞書型にできていたら、必要ないかも
92
- metadata_paths = dict()
93
- for path in metadata_urls["train"]:
94
- dname = dirname(path)
95
- folder = basename(Path(dname))
96
- # fname = basename(path)
97
- metadata_paths[folder] = path
 
 
98
 
99
  # 画像データは**.zipのURLをDict型として取得?
100
- data_urls = DataFilesDict.from_hf_repo(
101
- {datasets.Split.TRAIN: ["**"]},
 
102
  dataset_info=hfh_dataset_info,
103
  allowed_extensions=["zip", ".zip"],
104
  )
105
 
106
- data_url = dict()
107
- train_data_url = list()
108
- test_data_url = list()
109
- for path in data_urls["train"]:
110
- dname = dirname(path)
111
- folder = basename(Path(dname))
112
- # 辞書型
113
- if folder=="train":
114
- train_data_url.append(path)
115
- if folder == "test":
116
- test_data_url.append(path)
117
-
118
- data_url["train"] = train_data_url
119
- data_url["test"] = test_data_url
120
-
121
- # iteration
122
- iter_archive = dict()
123
- for split, files in data_url.items():
124
- file_name_obj = list()
125
- for file_ in files:
126
- downloaded_files_path = dl_manager.download(file_)
127
- for file_obj in dl_manager.iter_archive(downloaded_files_path):
128
- # print(file_obj)
129
- if file_obj[0].startswith('content/'):
130
- fname = basename(file_obj[0])
131
- file_obj = (fname, file_obj[1])
132
- file_name_obj.append(file_obj)
133
- # print(file_obj)
134
- else:
135
- file_name_obj.append(file_obj)
136
- # print(file_obj)
137
- iter_archive[split] = file_name_obj
138
 
139
  gs = []
140
- for split, files in iter_archive.items():
141
- '''
142
- split : "train" or "test" or "val"
143
- files : zip files
144
- '''
145
- # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
146
- metadata_path = dl_manager.download_and_extract(metadata_paths[split])
147
- # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
148
- gs.append(
149
- datasets.SplitGenerator(
150
- name = split,
151
- gen_kwargs = {
152
- "images" : iter(iter_archive[split]),
153
- "metadata_path": metadata_path # メタデータパスを渡す
154
- }
 
 
 
 
 
 
 
 
155
  )
156
- )
157
  return gs
158
 
159
 
 
82
  hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
83
 
84
  # メタデータであるjsonlファイルのURLを取得
85
+ # ここの抽出方法変えられないかな?
86
+ train_metadata_url = DataFilesDict.from_hf_repo(
87
+ {datasets.Split.TRAIN: ["data/train/**"]},
88
  dataset_info=hfh_dataset_info,
89
  allowed_extensions=["jsonl", ".jsonl"],
90
  )
91
 
92
+ test_metadata_url = DataFilesDict.from_hf_repo(
93
+ {datasets.Split.TEST: ["data/test/**"]},
94
+ dataset_info=hfh_dataset_info,
95
+ allowed_extensions=["jsonl", ".jsonl"],
96
+ )
97
+
98
+ metadata_urls = dict()
99
+ metadata_urls["train"] = train_metadata_url["train"]
100
+ metadata_urls["test"] = test_metadata_url["test"]
101
 
102
  # 画像データは**.zipのURLをDict型として取得?
103
+ # **.zipのURLをDict型として取得?
104
+ train_data_url = DataFilesDict.from_hf_repo(
105
+ {datasets.Split.TRAIN: ["data/train/**"]},
106
  dataset_info=hfh_dataset_info,
107
  allowed_extensions=["zip", ".zip"],
108
  )
109
 
110
+ test_data_url = DataFilesDict.from_hf_repo(
111
+ {datasets.Split.TEST: ["data/test/**"]},
112
+ dataset_info=hfh_dataset_info,
113
+ allowed_extensions=["zip", ".zip"]
114
+ )
115
+ data_urls = dict()
116
+ data_urls["train"] = train_data_url["train"]
117
+ data_urls["test"] = test_data_url["test"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  gs = []
120
+
121
+ for split, file_list in data_urls.items():
122
+ metadata_list = metadata_urls[split]
123
+ for i, file_ in enumerate(file_list):
124
+ '''
125
+ split : "train" or "test" or "val"
126
+ files : zip files
127
+ '''
128
+ # print(file_)
129
+ # print(metadata_list[0])
130
+ # # リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
131
+ metadata_path = dl_manager.download_and_extract(metadata_list[i])
132
+ downloaded_files = dl_manager.download(file_)
133
+ # # 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
134
+ gs.append(
135
+ datasets.SplitGenerator(
136
+ name = split,
137
+ gen_kwargs = {
138
+ # "images" : iter(iter_archive[split]),
139
+ "images" : downloaded_files,
140
+ "metadata_path": metadata_path # メタデータパスを渡す
141
+ }
142
+ )
143
  )
 
144
  return gs
145
 
146
 
data/test/metadata_test/metadata_0000.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_test/metadata_0001.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test/{metadata.jsonl → metadata_test/metadata_0002.jsonl} RENAMED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_test/metadata_0003.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/test/metadata_test/metadata_0004.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train/metadata_train/metadata_0000.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train/{metadata.jsonl → metadata_train/metadata_0001.jsonl} RENAMED
The diff for this file is too large to render. See raw diff
 
data/train/metadata_train/metadata_0002.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train/metadata_train/metadata_0003.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train/metadata_train/metadata_0004.jsonl ADDED
The diff for this file is too large to render. See raw diff