varora
commited on
Commit
·
4452567
1
Parent(s):
f2981b0
update hit.py
Browse files
hit.py
CHANGED
@@ -123,8 +123,10 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
123 |
with open(file_structure) as f:
|
124 |
file_structure = json.load(f)
|
125 |
print(file_structure)
|
126 |
-
|
127 |
-
|
|
|
|
|
128 |
print(f"data url: {data_urls}")
|
129 |
archive_paths = dl_manager.download(data_urls)
|
130 |
print(archive_paths)
|
@@ -133,7 +135,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
133 |
name=datasets.Split.TRAIN,
|
134 |
# These kwargs will be passed to _generate_examples
|
135 |
gen_kwargs={
|
136 |
-
"filepath":
|
137 |
"split": "train",
|
138 |
},
|
139 |
),
|
@@ -141,7 +143,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
141 |
name=datasets.Split.VALIDATION,
|
142 |
# These kwargs will be passed to _generate_examples
|
143 |
gen_kwargs={
|
144 |
-
"filepath":
|
145 |
"split": "validation",
|
146 |
},
|
147 |
),
|
@@ -149,7 +151,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
149 |
name=datasets.Split.TEST,
|
150 |
# These kwargs will be passed to _generate_examples
|
151 |
gen_kwargs={
|
152 |
-
"filepath":
|
153 |
"split": "test"
|
154 |
},
|
155 |
),
|
@@ -161,12 +163,12 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
161 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
162 |
# List all files in the path .gz
|
163 |
print(f"file path {filepath}")
|
164 |
-
file_paths = []
|
165 |
for root, dirs, files in os.walk(filepath):
|
166 |
for file in files:
|
167 |
if file.endswith('.gz'):
|
168 |
-
file_paths.append(file)
|
169 |
-
for subject_path in
|
170 |
with gzip.open(subject_path, 'rb') as f:
|
171 |
data = pickle.load(f)
|
172 |
key = data['subject_ID']
|
|
|
123 |
with open(file_structure) as f:
|
124 |
file_structure = json.load(f)
|
125 |
print(file_structure)
|
126 |
+
if not gender is None:
|
127 |
+
data_urls = {split: [os.path.join(gender, split, filename) for filename in file_structure[gender][split]] for split in splits}
|
128 |
+
else:
|
129 |
+
data_urls = {gender: {split: [os.path.join(gender, split, filename) for filename in file_structure[gender][split]] for split in splits} for gender in ['male', 'female']}
|
130 |
print(f"data url: {data_urls}")
|
131 |
archive_paths = dl_manager.download(data_urls)
|
132 |
print(archive_paths)
|
|
|
135 |
name=datasets.Split.TRAIN,
|
136 |
# These kwargs will be passed to _generate_examples
|
137 |
gen_kwargs={
|
138 |
+
"filepath": archive_paths['train'],
|
139 |
"split": "train",
|
140 |
},
|
141 |
),
|
|
|
143 |
name=datasets.Split.VALIDATION,
|
144 |
# These kwargs will be passed to _generate_examples
|
145 |
gen_kwargs={
|
146 |
+
"filepath": archive_paths['val'],
|
147 |
"split": "validation",
|
148 |
},
|
149 |
),
|
|
|
151 |
name=datasets.Split.TEST,
|
152 |
# These kwargs will be passed to _generate_examples
|
153 |
gen_kwargs={
|
154 |
+
"filepath": archive_paths['test'],
|
155 |
"split": "test"
|
156 |
},
|
157 |
),
|
|
|
163 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
164 |
# List all files in the path .gz
|
165 |
print(f"file path {filepath}")
|
166 |
+
"""file_paths = []
|
167 |
for root, dirs, files in os.walk(filepath):
|
168 |
for file in files:
|
169 |
if file.endswith('.gz'):
|
170 |
+
file_paths.append(file)"""
|
171 |
+
for subject_path in filepath:
|
172 |
with gzip.open(subject_path, 'rb') as f:
|
173 |
data = pickle.load(f)
|
174 |
key = data['subject_ID']
|