|
|
|
|
|
|
|
from typing import Callable |
|
|
|
import torch |
|
|
|
from mattergen.diffusion.sampling.pc_sampler import Diffusable, PredictorCorrector |
|
|
|
BatchTransform = Callable[[Diffusable], Diffusable] |
|
|
|
|
|
def identity(x: Diffusable) -> Diffusable: |
|
""" |
|
Default function that transforms data to its conditional state |
|
""" |
|
return x |
|
|
|
|
|
class GuidedPredictorCorrector(PredictorCorrector): |
|
""" |
|
Sampler for classifier-free guidance. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
*, |
|
guidance_scale: float, |
|
remove_conditioning_fn: BatchTransform, |
|
keep_conditioning_fn: BatchTransform | None = None, |
|
**kwargs, |
|
): |
|
""" |
|
guidance_scale: gamma in p_gamma(x|y)=p(x)p(y|x)**gamma for classifier-free guidance |
|
remove_conditioning_fn: function that removes conditioning from the data |
|
keep_conditioning_fn: function that will be applied to the data before evaluating the conditional score. For example, this function might drop some fields that you never want to condition on or add fields that indicate which conditions should be respected. |
|
**kwargs: passed on to parent class constructor. |
|
""" |
|
|
|
super().__init__(**kwargs) |
|
self._remove_conditioning_fn = remove_conditioning_fn |
|
self._keep_conditioning_fn = keep_conditioning_fn or identity |
|
self._guidance_scale = guidance_scale |
|
|
|
def _score_fn( |
|
self, |
|
x: Diffusable, |
|
t: torch.Tensor, |
|
) -> Diffusable: |
|
"""For each field, regardless of whether the corruption process is SDE or D3PM, we guide the score in the same way here, |
|
by taking a linear combination of the conditional and unconditional score model output. |
|
|
|
For discrete fields, the score model outputs are interpreted as logits, so the linear combination here means we compute logits for |
|
p_\gamma(x|y)=p(x)^(1-\gamma) p(x|y)^\gamma |
|
|
|
""" |
|
|
|
def get_unconditional_score(): |
|
return super(GuidedPredictorCorrector, self)._score_fn( |
|
x=self._remove_conditioning_fn(x), t=t |
|
) |
|
|
|
def get_conditional_score(): |
|
return super(GuidedPredictorCorrector, self)._score_fn( |
|
x=self._keep_conditioning_fn(x), t=t |
|
) |
|
|
|
if abs(self._guidance_scale - 1) < 1e-15: |
|
return get_conditional_score() |
|
elif abs(self._guidance_scale) < 1e-15: |
|
return get_unconditional_score() |
|
else: |
|
|
|
|
|
conditional_score = get_conditional_score() |
|
unconditional_score = get_unconditional_score() |
|
return unconditional_score.replace( |
|
**{ |
|
k: torch.lerp( |
|
unconditional_score[k], conditional_score[k], self._guidance_scale |
|
) |
|
for k in self._multi_corruption.corrupted_fields |
|
} |
|
) |
|
|