import os import json import logging import datasets logger = logging.getLogger(__name__) _DESCRIPTION = """ This dataset contains T1-weighted .nii.gz structural MRI scans in a BIDS-like arrangement. Each scan has an associated JSON sidecar with metadata, including a 'split' field indicating whether it's train, validation, or test. """ _CITATION = """ @dataset{Radiata-Brain-Structure, author = {Jesse Brown and Clayton Young}, title = {Brain-Structure: Processed Structural MRI Brain Scans Across the Lifespan}, year = {2025}, url = {https://huggingface.co/datasets/radiata-ai/brain-structure}, note = {Version 1.0}, publisher = {Hugging Face} } """ _HOMEPAGE = "https://huggingface.co/datasets/radiata-ai/brain-structure" _LICENSE = "ODC-By v1.0" class BrainStructureConfig(datasets.BuilderConfig): """ Configuration class for the Brain-Structure dataset. """ def __init__(self, **kwargs): super().__init__(**kwargs) class BrainStructure(datasets.GeneratorBasedBuilder): """ A dataset loader for T1 .nii.gz files plus JSON sidecars indicating splits (train, validation, test). Examples of how users typically load this dataset: >>> from datasets import load_dataset >>> ds_val = load_dataset("radiata-ai/brain-structure", split="validation", trust_remote_code=True) >>> ds_train = load_dataset("./brain-structure", split="train") # local clone """ VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ BrainStructureConfig( name="all", version=VERSION, description="All structural MRI data in a BIDS-like arrangement, labeled with train/val/test splits." ), ] DEFAULT_CONFIG_NAME = "all" def _info(self): """ Returns DatasetInfo, including the feature schema and other metadata. """ return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "nii_filepath": datasets.Value("string"), "metadata": { "split": datasets.Value("string"), "participant_id": datasets.Value("string"), "session_id": datasets.Value("string"), "study": datasets.Value("string"), "age": datasets.Value("int32"), "sex": datasets.Value("string"), "clinical_diagnosis": datasets.Value("string"), "scanner_manufacturer": datasets.Value("string"), "scanner_model": datasets.Value("string"), "field_strength": datasets.Value("string"), "image_quality_rating": datasets.Value("float"), "total_intracranial_volume": datasets.Value("float"), "license": datasets.Value("string"), "website": datasets.Value("string"), "citation": datasets.Value("string"), "t1_file_name": datasets.Value("string"), "radiata_id": datasets.Value("int32"), }, } ), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager): """ Creates SplitGenerators for 'train', 'validation', and 'test'. *No actual download* is done here, so data_dir is simply '.'. Why '.'? Because whether the dataset is used locally or from the Hub, Hugging Face will place (or reference) files in this same folder context. """ data_dir = "." # The local folder containing subdirectories like IXI/, DLBS/, etc. print(os.path.abspath(data_dir)) logger.info(f"BrainStructure: scanning data in {os.path.abspath(data_dir)}") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"data_dir": data_dir, "desired_split": "train"} ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"data_dir": data_dir, "desired_split": "validation"} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"data_dir": data_dir, "desired_split": "test"} ), ] def _generate_examples(self, data_dir, desired_split): """ Recursively walks the directory structure, looking for JSON sidecar files ending in '_scandata.json'. For each matching file, yields an example if sidecar["split"] == desired_split. The corresponding .nii.gz is located by prefix matching. """ id_ = 0 for root, dirs, files in os.walk(data_dir): for fname in files: # If you only want "msub" files, you can add: if fname.startswith("msub") and ... if fname.endswith("_scandata.json"): sidecar_path = os.path.join(root, fname) with open(sidecar_path, "r") as f: sidecar = json.load(f) if sidecar.get("split") == desired_split: # Find the .nii.gz prefix nii_prefix = fname.replace("_scandata.json", "_T1w") nii_filepath = None for potential_file in files: if potential_file.startswith(nii_prefix) and potential_file.endswith(".nii.gz"): nii_filepath = os.path.join(root, potential_file) break if not nii_filepath: logger.warning(f"No corresponding .nii.gz found for {sidecar_path}") continue yield id_, { "id": str(id_), "nii_filepath": nii_filepath, "metadata": { "split": sidecar.get("split", ""), "participant_id": sidecar.get("participant_id", ""), "session_id": sidecar.get("session_id", ""), "study": sidecar.get("study", ""), "age": sidecar.get("age", 0), "sex": sidecar.get("sex", ""), "clinical_diagnosis": sidecar.get("clinical_diagnosis", ""), "scanner_manufacturer": sidecar.get("scanner_manufacturer", ""), "scanner_model": sidecar.get("scanner_model", ""), "field_strength": sidecar.get("field_strength", ""), "image_quality_rating": float(sidecar.get("image_quality_rating", 0.0)), "total_intracranial_volume": float(sidecar.get("total_intracranial_volume", 0.0)), "license": sidecar.get("license", ""), "website": sidecar.get("website", ""), "citation": sidecar.get("citation", ""), "t1_file_name": sidecar.get("t1_file_name", ""), "radiata_id": sidecar.get("radiata_id", 0), }, } id_ += 1