Add files using upload-large-folder tool
Browse files- brain-structure.py +66 -82
- data.zip +3 -0
brain-structure.py
CHANGED
@@ -2,13 +2,13 @@ import os
|
|
2 |
import json
|
3 |
import logging
|
4 |
import datasets
|
|
|
5 |
|
6 |
logger = logging.getLogger(__name__)
|
7 |
|
8 |
_DESCRIPTION = """
|
9 |
-
|
10 |
-
|
11 |
-
whether it's train, validation, or test.
|
12 |
"""
|
13 |
|
14 |
_CITATION = """
|
@@ -25,44 +25,40 @@ _CITATION = """
|
|
25 |
_HOMEPAGE = "https://huggingface.co/datasets/radiata-ai/brain-structure"
|
26 |
_LICENSE = "ODC-By v1.0"
|
27 |
|
|
|
|
|
|
|
28 |
|
29 |
class BrainStructureConfig(datasets.BuilderConfig):
|
30 |
-
"""
|
31 |
-
Configuration class for the Brain-Structure dataset.
|
32 |
-
"""
|
33 |
def __init__(self, **kwargs):
|
34 |
super().__init__(**kwargs)
|
35 |
|
36 |
|
37 |
class BrainStructure(datasets.GeneratorBasedBuilder):
|
38 |
"""
|
39 |
-
A dataset loader for T1 .nii.gz files plus JSON sidecars
|
40 |
-
(train, validation, test).
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
>>> from datasets import load_dataset
|
45 |
-
>>> ds_val = load_dataset("radiata-ai/brain-structure", split="validation", trust_remote_code=True)
|
46 |
-
>>> ds_train = load_dataset("./brain-structure", split="train") # local clone
|
47 |
"""
|
48 |
|
49 |
VERSION = datasets.Version("1.0.0")
|
50 |
-
|
51 |
BUILDER_CONFIGS = [
|
52 |
BrainStructureConfig(
|
53 |
-
name="
|
54 |
version=VERSION,
|
55 |
-
description="
|
56 |
-
)
|
57 |
]
|
58 |
-
DEFAULT_CONFIG_NAME = "
|
59 |
|
60 |
def _info(self):
|
61 |
-
"""
|
62 |
-
Returns DatasetInfo, including the feature schema and other metadata.
|
63 |
-
"""
|
64 |
return datasets.DatasetInfo(
|
65 |
description=_DESCRIPTION,
|
|
|
|
|
|
|
66 |
features=datasets.Features(
|
67 |
{
|
68 |
"id": datasets.Value("string"),
|
@@ -88,86 +84,74 @@ class BrainStructure(datasets.GeneratorBasedBuilder):
|
|
88 |
},
|
89 |
}
|
90 |
),
|
91 |
-
homepage=_HOMEPAGE,
|
92 |
-
license=_LICENSE,
|
93 |
-
citation=_CITATION,
|
94 |
)
|
95 |
|
96 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
97 |
"""
|
98 |
-
|
99 |
-
|
100 |
"""
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
|
105 |
return [
|
106 |
datasets.SplitGenerator(
|
107 |
name=datasets.Split.TRAIN,
|
108 |
-
gen_kwargs={"data_dir":
|
109 |
),
|
110 |
datasets.SplitGenerator(
|
111 |
name=datasets.Split.VALIDATION,
|
112 |
-
gen_kwargs={"data_dir":
|
113 |
),
|
114 |
datasets.SplitGenerator(
|
115 |
name=datasets.Split.TEST,
|
116 |
-
gen_kwargs={"data_dir":
|
117 |
),
|
118 |
]
|
119 |
|
120 |
def _generate_examples(self, data_dir, desired_split):
|
121 |
"""
|
122 |
-
Recursively
|
123 |
-
|
124 |
-
if sidecar["split"] == desired_split.
|
125 |
-
|
126 |
-
The corresponding .nii.gz is located by prefix matching.
|
127 |
"""
|
128 |
id_ = 0
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
"citation": sidecar.get("citation", ""),
|
169 |
-
"t1_file_name": sidecar.get("t1_file_name", ""),
|
170 |
-
"radiata_id": sidecar.get("radiata_id", 0),
|
171 |
-
},
|
172 |
-
}
|
173 |
-
id_ += 1
|
|
|
2 |
import json
|
3 |
import logging
|
4 |
import datasets
|
5 |
+
from pathlib import Path
|
6 |
|
7 |
logger = logging.getLogger(__name__)
|
8 |
|
9 |
_DESCRIPTION = """
|
10 |
+
A collection of T1-weighted .nii.gz structural MRI scans in a BIDS-like arrangement,
|
11 |
+
with JSON sidecar metadata indicating train/validation/test splits.
|
|
|
12 |
"""
|
13 |
|
14 |
_CITATION = """
|
|
|
25 |
_HOMEPAGE = "https://huggingface.co/datasets/radiata-ai/brain-structure"
|
26 |
_LICENSE = "ODC-By v1.0"
|
27 |
|
28 |
+
# The "resolve/main/data.zip" part ensures it grabs data.zip from your 'main' branch.
|
29 |
+
_DATA_URL = "https://huggingface.co/datasets/radiata-ai/brain-structure/resolve/main/data.zip"
|
30 |
+
|
31 |
|
32 |
class BrainStructureConfig(datasets.BuilderConfig):
|
33 |
+
"""Configuration for Brain-Structure dataset (if you need multiple, define them here)."""
|
|
|
|
|
34 |
def __init__(self, **kwargs):
|
35 |
super().__init__(**kwargs)
|
36 |
|
37 |
|
38 |
class BrainStructure(datasets.GeneratorBasedBuilder):
|
39 |
"""
|
40 |
+
A dataset loader for T1 .nii.gz files plus JSON sidecars stored in a single ZIP.
|
|
|
41 |
|
42 |
+
Usage:
|
43 |
+
ds_train = load_dataset("radiata-ai/brain-structure", split="train", trust_remote_code=True)
|
|
|
|
|
|
|
44 |
"""
|
45 |
|
46 |
VERSION = datasets.Version("1.0.0")
|
|
|
47 |
BUILDER_CONFIGS = [
|
48 |
BrainStructureConfig(
|
49 |
+
name="default",
|
50 |
version=VERSION,
|
51 |
+
description="Structural MRIs with sidecar metadata. Splits (train/val/test) indicated in the sidecars.",
|
52 |
+
)
|
53 |
]
|
54 |
+
DEFAULT_CONFIG_NAME = "default"
|
55 |
|
56 |
def _info(self):
|
|
|
|
|
|
|
57 |
return datasets.DatasetInfo(
|
58 |
description=_DESCRIPTION,
|
59 |
+
homepage=_HOMEPAGE,
|
60 |
+
license=_LICENSE,
|
61 |
+
citation=_CITATION,
|
62 |
features=datasets.Features(
|
63 |
{
|
64 |
"id": datasets.Value("string"),
|
|
|
84 |
},
|
85 |
}
|
86 |
),
|
|
|
|
|
|
|
87 |
)
|
88 |
|
89 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
90 |
"""
|
91 |
+
Downloads and extracts 'data.zip', then defines train/validation/test splits
|
92 |
+
by matching sidecars with 'split': 'train'/'validation'/'test'.
|
93 |
"""
|
94 |
+
# Download and extract your single ZIP containing all subfolders
|
95 |
+
extracted_dir = dl_manager.download_and_extract(_DATA_URL)
|
96 |
+
# The ZIP will typically unzip into a folder named "data" or similar. We'll just scan everything inside.
|
97 |
|
98 |
return [
|
99 |
datasets.SplitGenerator(
|
100 |
name=datasets.Split.TRAIN,
|
101 |
+
gen_kwargs={"data_dir": extracted_dir, "desired_split": "train"},
|
102 |
),
|
103 |
datasets.SplitGenerator(
|
104 |
name=datasets.Split.VALIDATION,
|
105 |
+
gen_kwargs={"data_dir": extracted_dir, "desired_split": "validation"},
|
106 |
),
|
107 |
datasets.SplitGenerator(
|
108 |
name=datasets.Split.TEST,
|
109 |
+
gen_kwargs={"data_dir": extracted_dir, "desired_split": "test"},
|
110 |
),
|
111 |
]
|
112 |
|
113 |
def _generate_examples(self, data_dir, desired_split):
|
114 |
"""
|
115 |
+
Recursively find sidecar JSONs with 'split' matching desired_split.
|
116 |
+
For each, yield an example containing the .nii.gz path + metadata.
|
|
|
|
|
|
|
117 |
"""
|
118 |
id_ = 0
|
119 |
+
data_path = Path(data_dir)
|
120 |
+
for json_path in data_path.rglob("*_scandata.json"):
|
121 |
+
with open(json_path, "r") as f:
|
122 |
+
sidecar = json.load(f)
|
123 |
+
|
124 |
+
if sidecar.get("split") == desired_split:
|
125 |
+
# Build the matching NIfTI path
|
126 |
+
possible_nii_name = json_path.name.replace("_scandata.json", "_T1w")
|
127 |
+
# Look in the same folder for .nii.gz
|
128 |
+
nii_path = json_path.parent / f"{possible_nii_name}.nii.gz"
|
129 |
+
|
130 |
+
if not nii_path.is_file():
|
131 |
+
logger.warning(f"No .nii.gz found for {json_path}")
|
132 |
+
continue
|
133 |
+
|
134 |
+
yield id_, {
|
135 |
+
"id": str(id_),
|
136 |
+
"nii_filepath": str(nii_path),
|
137 |
+
"metadata": {
|
138 |
+
"split": sidecar.get("split", ""),
|
139 |
+
"participant_id": sidecar.get("participant_id", ""),
|
140 |
+
"session_id": sidecar.get("session_id", ""),
|
141 |
+
"study": sidecar.get("study", ""),
|
142 |
+
"age": sidecar.get("age", 0),
|
143 |
+
"sex": sidecar.get("sex", ""),
|
144 |
+
"clinical_diagnosis": sidecar.get("clinical_diagnosis", ""),
|
145 |
+
"scanner_manufacturer": sidecar.get("scanner_manufacturer", ""),
|
146 |
+
"scanner_model": sidecar.get("scanner_model", ""),
|
147 |
+
"field_strength": sidecar.get("field_strength", ""),
|
148 |
+
"image_quality_rating": float(sidecar.get("image_quality_rating", 0.0)),
|
149 |
+
"total_intracranial_volume": float(sidecar.get("total_intracranial_volume", 0.0)),
|
150 |
+
"license": sidecar.get("license", ""),
|
151 |
+
"website": sidecar.get("website", ""),
|
152 |
+
"citation": sidecar.get("citation", ""),
|
153 |
+
"t1_file_name": sidecar.get("t1_file_name", ""),
|
154 |
+
"radiata_id": sidecar.get("radiata_id", 0),
|
155 |
+
},
|
156 |
+
}
|
157 |
+
id_ += 1
|
|
|
|
|
|
|
|
|
|
|
|
data.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:23e28ac334816528008cb0a3da4256439776b0793da4b7210a752a8feb07a8a4
|
3 |
+
size 8431394741
|