import torch class FlowMatchScheduler(): def __init__(self, num_inference_steps=100, num_train_timesteps=1000, shift=3.0, sigma_max=1.0, sigma_min=0.003/1.002): self.num_train_timesteps = num_train_timesteps self.shift = shift self.sigma_max = sigma_max self.sigma_min = sigma_min self.set_timesteps(num_inference_steps) def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0): sigma_start = self.sigma_min + (self.sigma_max - self.sigma_min) * denoising_strength self.sigmas = torch.linspace(sigma_start, self.sigma_min, num_inference_steps) self.sigmas = self.shift * self.sigmas / (1 + (self.shift - 1) * self.sigmas) self.timesteps = self.sigmas * self.num_train_timesteps def step(self, model_output, timestep, sample, to_final=False): if isinstance(timestep, torch.Tensor): timestep = timestep.cpu() timestep_id = torch.argmin((self.timesteps - timestep).abs()) sigma = self.sigmas[timestep_id] if to_final or timestep_id + 1 >= len(self.timesteps): sigma_ = 0 else: sigma_ = self.sigmas[timestep_id + 1] prev_sample = sample + model_output * (sigma_ - sigma) return prev_sample def return_to_timestep(self, timestep, sample, sample_stablized): # This scheduler doesn't support this function. pass def add_noise(self, original_samples, noise, timestep): if isinstance(timestep, torch.Tensor): timestep = timestep.cpu() timestep_id = torch.argmin((self.timesteps - timestep).abs()) sigma = self.sigmas[timestep_id] sample = (1 - sigma) * original_samples + sigma * noise return sample def training_target(self, sample, noise, timestep): target = noise - sample return target