File size: 6,232 Bytes
cfeea40 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from __future__ import annotations
from collections import deque
from typing import Any, Dict, Generic, Optional, Protocol, Sequence, TypeVar, Union
import numpy as np
import pytorch_lightning as pl
import torch
from hydra.errors import InstantiationException
from hydra.utils import instantiate
from omegaconf import DictConfig
from pytorch_lightning.utilities.types import STEP_OUTPUT
from torch.optim import AdamW, Optimizer
from tqdm import tqdm
from mattergen.diffusion.config import Config
from mattergen.diffusion.data.batched_data import BatchedData
from mattergen.diffusion.diffusion_module import DiffusionModule
T = TypeVar("T", bound=BatchedData)
class OptimizerPartial(Protocol):
"""Callable to instantiate an optimizer."""
def __call__(self, params: Any) -> Optimizer:
raise NotImplementedError
class SchedulerPartial(Protocol):
"""Callable to instantiate a learning rate scheduler."""
def __call__(self, optimizer: Optimizer) -> Any:
raise NotImplementedError
def get_default_optimizer(params):
return AdamW(params=params, lr=1e-4, weight_decay=0, amsgrad=True)
class DiffusionLightningModule(pl.LightningModule, Generic[T]):
"""LightningModule for instantiating and training a DiffusionModule."""
def __init__(
self,
diffusion_module: DiffusionModule[T],
optimizer_partial: Optional[OptimizerPartial] = None,
scheduler_partials: Optional[Sequence[Dict[str, Union[Any, SchedulerPartial]]]] = None,
):
"""_summary_
Args:
diffusion_module: The diffusion module to use.
optimizer_partial: Used to instantiate optimizer.
scheduler_partials: used to instantiate learning rate schedulers
"""
super().__init__()
scheduler_partials = scheduler_partials or []
optimizer_partial = optimizer_partial or get_default_optimizer
self.save_hyperparameters(
ignore=("optimizer_partial", "scheduler_partials", "diffusion_module")
)
self.diffusion_module = diffusion_module
self._optimizer_partial = optimizer_partial
self._scheduler_partials = scheduler_partials
@classmethod
def load_from_checkpoint(
cls,
checkpoint_path: str,
map_location: Optional[str] = None,
**kwargs,
) -> DiffusionLightningModule:
"""Load model from checkpoint. kwargs are passed to hydra's instantiate and can override
arguments from the checkpoint config."""
checkpoint = torch.load(checkpoint_path, map_location=map_location)
# The config should have been saved in the checkpoint by AddConfigCallback in run.py
config = Config(**checkpoint["config"])
try:
lightning_module = instantiate(config.lightning_module, **kwargs)
except InstantiationException as e:
print("Could not instantiate model from the checkpoint.")
print(
"If the error is due to an unexpected argument because the checkpoint and the code have diverged, try using load_from_checkpoint_and_config instead."
)
raise e
assert isinstance(lightning_module, cls)
# Restore state of the DiffusionLightningModule.
lightning_module.load_state_dict(checkpoint["state_dict"])
return lightning_module
@classmethod
def load_from_checkpoint_and_config(
cls,
checkpoint_path: str,
config: DictConfig,
map_location: Optional[str] = None,
strict: bool = True,
) -> tuple[DiffusionLightningModule, torch.nn.modules.module._IncompatibleKeys]:
"""Load model from checkpoint, but instead of using the config stored in the checkpoint,
use the config passed in as an argument. This is useful when, e.g., an unused argument was
removed in the code but is still present in the checkpoint config."""
checkpoint = torch.load(checkpoint_path, map_location=map_location)
lightning_module = instantiate(config)
assert isinstance(lightning_module, cls)
# Restore state of the DiffusionLightningModule.
result = lightning_module.load_state_dict(checkpoint["state_dict"], strict=strict)
return lightning_module, result
def configure_optimizers(self) -> Any:
optimizer = self._optimizer_partial(params=self.diffusion_module.parameters())
if self._scheduler_partials:
lr_schedulers = [
{
**scheduler_dict,
"scheduler": scheduler_dict["scheduler"](
optimizer=optimizer,
),
}
for scheduler_dict in self._scheduler_partials
]
return [
optimizer,
], lr_schedulers
else:
return optimizer
def training_step(self, train_batch: T, batch_idx: int) -> STEP_OUTPUT:
return self._calc_loss(train_batch, True)
def validation_step(self, val_batch: T, batch_idx: int) -> Optional[STEP_OUTPUT]:
return self._calc_loss(val_batch, False)
def test_step(self, test_batch: T, batch_idx: int) -> Optional[STEP_OUTPUT]:
return self._calc_loss(test_batch, False)
def _calc_loss(self, batch: T, train: bool) -> Optional[STEP_OUTPUT]:
"""Calculate loss and metrics given a batch of clean data."""
loss, metrics = self.diffusion_module.calc_loss(batch)
# Log the results
step_type = "train" if train else "val"
batch_size = batch.get_batch_size()
self.log(
f"loss_{step_type}",
loss,
on_step=train,
on_epoch=True,
prog_bar=train,
batch_size=batch_size,
sync_dist=True,
)
for k, v in metrics.items():
self.log(
f"{k}_{step_type}",
v,
on_step=train,
on_epoch=True,
prog_bar=train,
batch_size=batch_size,
sync_dist=True,
)
return loss
|