Spaces:
Sleeping
Sleeping
File size: 6,138 Bytes
fa7be76 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
from typing import Any, Dict, Optional
from lightning import LightningDataModule
from torch.utils.data import DataLoader, Dataset
from src.data.components.hrcwhu import HRCWHU
class HRCWHUDataModule(LightningDataModule):
def __init__(
self,
root: str,
train_pipeline: None,
val_pipeline: None,
test_pipeline: None,
seed: int=42,
batch_size: int = 1,
num_workers: int = 0,
pin_memory: bool = False,
persistent_workers: bool = False,
) -> None:
super().__init__()
# this line allows to access init params with 'self.hparams' attribute
# also ensures init params will be stored in ckpt
self.save_hyperparameters(logger=False)
self.train_dataset: Optional[Dataset] = None
self.val_dataset: Optional[Dataset] = None
self.test_dataset: Optional[Dataset] = None
self.batch_size_per_device = batch_size
@property
def num_classes(self) -> int:
return len(HRCWHU.METAINFO["classes"])
def prepare_data(self) -> None:
"""Download data if needed. Lightning ensures that `self.prepare_data()` is called only
within a single process on CPU, so you can safely add your downloading logic within. In
case of multi-node training, the execution of this hook depends upon
`self.prepare_data_per_node()`.
Do not use it to assign state (self.x = y).
"""
# train
HRCWHU(
root=self.hparams.root,
phase="train",
**self.hparams.train_pipeline,
seed=self.hparams.seed,
)
# val or test
HRCWHU(
root=self.hparams.root,
phase="test",
**self.hparams.test_pipeline,
seed=self.hparams.seed,
)
def setup(self, stage: Optional[str] = None) -> None:
"""Load data. Set variables: `self.data_train`, `self.data_val`, `self.data_test`.
This method is called by Lightning before `trainer.fit()`, `trainer.validate()`, `trainer.test()`, and
`trainer.predict()`, so be careful not to execute things like random split twice! Also, it is called after
`self.prepare_data()` and there is a barrier in between which ensures that all the processes proceed to
`self.setup()` once the data is prepared and available for use.
:param stage: The stage to setup. Either `"fit"`, `"validate"`, `"test"`, or `"predict"`. Defaults to ``None``.
"""
# Divide batch size by the number of devices.
if self.trainer is not None:
if self.hparams.batch_size % self.trainer.world_size != 0:
raise RuntimeError(
f"Batch size ({self.hparams.batch_size}) is not divisible by the number of devices ({self.trainer.world_size})."
)
self.batch_size_per_device = self.hparams.batch_size // self.trainer.world_size
# load and split datasets only if not loaded already
if not self.train_dataset and not self.val_dataset and not self.test_dataset:
self.train_dataset = HRCWHU(
root=self.hparams.root,
phase="train",
**self.hparams.train_pipeline,
seed=self.hparams.seed,
)
self.val_dataset = self.test_dataset = HRCWHU(
root=self.hparams.root,
phase="test",
**self.hparams.test_pipeline,
seed=self.hparams.seed,
)
def train_dataloader(self) -> DataLoader[Any]:
"""Create and return the train dataloader.
:return: The train dataloader.
"""
return DataLoader(
dataset=self.train_dataset,
batch_size=self.batch_size_per_device,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
persistent_workers=self.hparams.persistent_workers,
shuffle=True,
)
def val_dataloader(self) -> DataLoader[Any]:
"""Create and return the validation dataloader.
:return: The validation dataloader.
"""
return DataLoader(
dataset=self.val_dataset,
batch_size=self.batch_size_per_device,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
persistent_workers=self.hparams.persistent_workers,
shuffle=False,
)
def test_dataloader(self) -> DataLoader[Any]:
"""Create and return the test dataloader.
:return: The test dataloader.
"""
return DataLoader(
dataset=self.test_dataset,
batch_size=self.batch_size_per_device,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
persistent_workers=self.hparams.persistent_workers,
shuffle=False,
)
def teardown(self, stage: Optional[str] = None) -> None:
"""Lightning hook for cleaning up after `trainer.fit()`, `trainer.validate()`,
`trainer.test()`, and `trainer.predict()`.
:param stage: The stage being torn down. Either `"fit"`, `"validate"`, `"test"`, or `"predict"`.
Defaults to ``None``.
"""
pass
def state_dict(self) -> Dict[Any, Any]:
"""Called when saving a checkpoint. Implement to generate and save the datamodule state.
:return: A dictionary containing the datamodule state that you want to save.
"""
return {}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
"""Called when loading a checkpoint. Implement to reload datamodule state given datamodule
`state_dict()`.
:param state_dict: The datamodule state returned by `self.state_dict()`.
"""
pass
if __name__ == "__main__":
_ = HRCWHUDataModule()
|