Spaces:
vilarin
/
Running on Zero

vilarin commited on
Commit
c075c0f
·
verified ·
1 Parent(s): 893f9cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -101,12 +101,12 @@ class ModelWrapper:
101
  #current_timesteps = current_timesteps.to(torch.float16)
102
  print(f'current_timestpes: {current_timesteps.dtype}')
103
  eval_images = self.model(noise, current_timesteps, prompt_embed, added_cond_kwargs=unet_added_conditions).sample
104
-
105
  eval_images = get_x0_from_noise(noise, eval_images, alphas_cumprod, current_timesteps).to(self.DTYPE)
106
-
107
  next_timestep = current_timesteps - step_interval
108
  noise = self.scheduler.add_noise(eval_images, torch.randn_like(eval_images), next_timestep).to(torch.float16)
109
-
110
  if fast_vae_decode:
111
  eval_images = self.tiny_vae.decode(eval_images.to(self.tiny_vae_dtype) / self.tiny_vae.config.scaling_factor, return_dict=False)[0]
112
  else:
 
101
  #current_timesteps = current_timesteps.to(torch.float16)
102
  print(f'current_timestpes: {current_timesteps.dtype}')
103
  eval_images = self.model(noise, current_timesteps, prompt_embed, added_cond_kwargs=unet_added_conditions).sample
104
+ print(eval_images.dtype)
105
  eval_images = get_x0_from_noise(noise, eval_images, alphas_cumprod, current_timesteps).to(self.DTYPE)
106
+ print(eval_images.dtype)
107
  next_timestep = current_timesteps - step_interval
108
  noise = self.scheduler.add_noise(eval_images, torch.randn_like(eval_images), next_timestep).to(torch.float16)
109
+ print(noise.dtype)
110
  if fast_vae_decode:
111
  eval_images = self.tiny_vae.decode(eval_images.to(self.tiny_vae_dtype) / self.tiny_vae.config.scaling_factor, return_dict=False)[0]
112
  else: