File size: 3,075 Bytes
cfeea40 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from typing import Callable
import torch
from mattergen.diffusion.sampling.pc_sampler import Diffusable, PredictorCorrector
BatchTransform = Callable[[Diffusable], Diffusable]
def identity(x: Diffusable) -> Diffusable:
"""
Default function that transforms data to its conditional state
"""
return x
class GuidedPredictorCorrector(PredictorCorrector):
"""
Sampler for classifier-free guidance.
"""
def __init__(
self,
*,
guidance_scale: float,
remove_conditioning_fn: BatchTransform,
keep_conditioning_fn: BatchTransform | None = None,
**kwargs,
):
"""
guidance_scale: gamma in p_gamma(x|y)=p(x)p(y|x)**gamma for classifier-free guidance
remove_conditioning_fn: function that removes conditioning from the data
keep_conditioning_fn: function that will be applied to the data before evaluating the conditional score. For example, this function might drop some fields that you never want to condition on or add fields that indicate which conditions should be respected.
**kwargs: passed on to parent class constructor.
"""
super().__init__(**kwargs)
self._remove_conditioning_fn = remove_conditioning_fn
self._keep_conditioning_fn = keep_conditioning_fn or identity
self._guidance_scale = guidance_scale
def _score_fn(
self,
x: Diffusable,
t: torch.Tensor,
) -> Diffusable:
"""For each field, regardless of whether the corruption process is SDE or D3PM, we guide the score in the same way here,
by taking a linear combination of the conditional and unconditional score model output.
For discrete fields, the score model outputs are interpreted as logits, so the linear combination here means we compute logits for
p_\gamma(x|y)=p(x)^(1-\gamma) p(x|y)^\gamma
"""
def get_unconditional_score():
return super(GuidedPredictorCorrector, self)._score_fn(
x=self._remove_conditioning_fn(x), t=t
)
def get_conditional_score():
return super(GuidedPredictorCorrector, self)._score_fn(
x=self._keep_conditioning_fn(x), t=t
)
if abs(self._guidance_scale - 1) < 1e-15:
return get_conditional_score()
elif abs(self._guidance_scale) < 1e-15:
return get_unconditional_score()
else:
# guided_score = guidance_factor * conditional_score + (1-guidance_factor) * unconditional_score
conditional_score = get_conditional_score()
unconditional_score = get_unconditional_score()
return unconditional_score.replace(
**{
k: torch.lerp(
unconditional_score[k], conditional_score[k], self._guidance_scale
)
for k in self._multi_corruption.corrupted_fields
}
)
|