Spaces:
Runtime error
Runtime error
QinOwen
commited on
Commit
·
12fc597
1
Parent(s):
cb70887
debug-seed
Browse files
VADER-VideoCrafter/lvdm/models/samplers/ddim.py
CHANGED
@@ -152,8 +152,10 @@ class DDIMSampler(object):
|
|
152 |
img = torch.randn(shape, device=device)
|
153 |
else:
|
154 |
img = x_T
|
155 |
-
|
156 |
-
print(
|
|
|
|
|
157 |
print("Debug initial noise: ", img.sum().item())
|
158 |
print("noise device: ", img.device)
|
159 |
|
|
|
152 |
img = torch.randn(shape, device=device)
|
153 |
else:
|
154 |
img = x_T
|
155 |
+
|
156 |
+
print("x_T: ", x_T)
|
157 |
+
print("shape: ", shape)
|
158 |
+
print('random seed debug: ', torch.randn(100, device=device).sum())
|
159 |
print("Debug initial noise: ", img.sum().item())
|
160 |
print("noise device: ", img.device)
|
161 |
|
VADER-VideoCrafter/scripts/main/train_t2v_lora.py
CHANGED
@@ -589,7 +589,7 @@ def run_training(args, model, **kwargs):
|
|
589 |
# load the pretrained LoRA model
|
590 |
peft.set_peft_model_state_dict(peft_model, torch.load(args.lora_ckpt_path))
|
591 |
|
592 |
-
print('random seed debug: ', torch.randn(100).sum())
|
593 |
print("precision: ", peft_model.dtype)
|
594 |
# precision of first_stage_model
|
595 |
print("precision of first_stage_model: ", peft_model.first_stage_model.dtype)
|
@@ -639,7 +639,6 @@ def run_training(args, model, **kwargs):
|
|
639 |
if isinstance(prompts, str):
|
640 |
prompts = [prompts]
|
641 |
|
642 |
-
seed_everything_self(args.seed)
|
643 |
with accelerator.autocast(): # mixed precision
|
644 |
if isinstance(peft_model, torch.nn.parallel.DistributedDataParallel):
|
645 |
text_emb = peft_model.module.get_learned_conditioning(prompts).to(accelerator.device)
|
@@ -652,7 +651,8 @@ def run_training(args, model, **kwargs):
|
|
652 |
raise NotImplementedError
|
653 |
|
654 |
# Inference Step 3.2: inference, batch_samples shape: batch, <samples>, c, t, h, w
|
655 |
-
# no backprop_mode=args.backprop_mode because it is inference process
|
|
|
656 |
if isinstance(peft_model, torch.nn.parallel.DistributedDataParallel):
|
657 |
batch_samples = batch_ddim_sampling(peft_model.module, cond, noise_shape, args.n_samples, \
|
658 |
args.ddim_steps, args.ddim_eta, args.unconditional_guidance_scale, None, decode_frame=args.decode_frame, **kwargs)
|
|
|
589 |
# load the pretrained LoRA model
|
590 |
peft.set_peft_model_state_dict(peft_model, torch.load(args.lora_ckpt_path))
|
591 |
|
592 |
+
print('random seed debug: ', torch.randn(100, device=accelerator.device).sum())
|
593 |
print("precision: ", peft_model.dtype)
|
594 |
# precision of first_stage_model
|
595 |
print("precision of first_stage_model: ", peft_model.first_stage_model.dtype)
|
|
|
639 |
if isinstance(prompts, str):
|
640 |
prompts = [prompts]
|
641 |
|
|
|
642 |
with accelerator.autocast(): # mixed precision
|
643 |
if isinstance(peft_model, torch.nn.parallel.DistributedDataParallel):
|
644 |
text_emb = peft_model.module.get_learned_conditioning(prompts).to(accelerator.device)
|
|
|
651 |
raise NotImplementedError
|
652 |
|
653 |
# Inference Step 3.2: inference, batch_samples shape: batch, <samples>, c, t, h, w
|
654 |
+
# no backprop_mode=args.backprop_mode because it is inference process
|
655 |
+
seed_everything_self(args.seed)
|
656 |
if isinstance(peft_model, torch.nn.parallel.DistributedDataParallel):
|
657 |
batch_samples = batch_ddim_sampling(peft_model.module, cond, noise_shape, args.n_samples, \
|
658 |
args.ddim_steps, args.ddim_eta, args.unconditional_guidance_scale, None, decode_frame=args.decode_frame, **kwargs)
|