Spaces:
Running
on
Zero
Running
on
Zero
fix vae nan bug
Browse files
app.py
CHANGED
@@ -218,8 +218,14 @@ if NEW_MODEL:
|
|
218 |
print(missing_keys, extra_keys)
|
219 |
assert len(missing_keys) == 0
|
220 |
vae_state_dict = torch.load(vae_path, map_location='cpu')['state_dict']
|
|
|
221 |
autoencoder = vqvae.create_model(3, 3, opts.latent_dim).eval().requires_grad_(False)
|
|
|
|
|
|
|
222 |
missing_keys, extra_keys = autoencoder.load_state_dict(vae_state_dict, strict=False)
|
|
|
|
|
223 |
autoencoder = autoencoder.to(device)
|
224 |
autoencoder.eval()
|
225 |
assert len(missing_keys) == 0
|
|
|
218 |
print(missing_keys, extra_keys)
|
219 |
assert len(missing_keys) == 0
|
220 |
vae_state_dict = torch.load(vae_path, map_location='cpu')['state_dict']
|
221 |
+
print(f"vae_state_dict encoder dtype: {vae_state_dict['encoder.conv_in.weight'].dtype}")
|
222 |
autoencoder = vqvae.create_model(3, 3, opts.latent_dim).eval().requires_grad_(False)
|
223 |
+
print(f"autoencoder encoder dtype: {next(autoencoder.encoder.parameters()).dtype}")
|
224 |
+
print(f"encoder before load_state_dict parameters min: {min([p.min() for p in autoencoder.encoder.parameters()])}")
|
225 |
+
print(f"encoder before load_state_dict parameters max: {max([p.max() for p in autoencoder.encoder.parameters()])}")
|
226 |
missing_keys, extra_keys = autoencoder.load_state_dict(vae_state_dict, strict=False)
|
227 |
+
print(f"encoder after load_state_dict parameters min: {min([p.min() for p in autoencoder.encoder.parameters()])}")
|
228 |
+
print(f"encoder after load_state_dict parameters max: {max([p.max() for p in autoencoder.encoder.parameters()])}")
|
229 |
autoencoder = autoencoder.to(device)
|
230 |
autoencoder.eval()
|
231 |
assert len(missing_keys) == 0
|