fffiloni commited on
Commit
1b6ca43
1 Parent(s): dfe3b1e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -60,12 +60,12 @@ def infer(image_path, prompt, orbit_type, progress=gr.Progress(track_tqdm=True))
60
  elif orbit_type == "Up":
61
  weight_name = "orbit_up_lora_weights.safetensors"
62
  #adapter_name = "orbit_up_lora_weights"
63
- lora_rank = 256
64
 
65
  adapter_timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
66
 
67
  # Load LoRA weights on CPU, move to GPU afterward
68
- pipe.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=adapter_timestamp)
69
  pipe.fuse_lora(lora_scale=1 / lora_rank)
70
 
71
  # Move the pipeline to GPU for inference
@@ -80,7 +80,7 @@ def infer(image_path, prompt, orbit_type, progress=gr.Progress(track_tqdm=True))
80
  video = pipe(
81
  image,
82
  prompt,
83
- num_inference_steps=50,
84
  guidance_scale=7.0,
85
  use_dynamic_cfg=True,
86
  generator=torch.Generator(device="cpu").manual_seed(seed)
 
60
  elif orbit_type == "Up":
61
  weight_name = "orbit_up_lora_weights.safetensors"
62
  #adapter_name = "orbit_up_lora_weights"
63
+ lora_rank = 128
64
 
65
  adapter_timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
66
 
67
  # Load LoRA weights on CPU, move to GPU afterward
68
+ pipe.load_lora_weights(lora_path, weight_name=weight_name, adapter_name=f"adapter_{adapter_timestamp}")
69
  pipe.fuse_lora(lora_scale=1 / lora_rank)
70
 
71
  # Move the pipeline to GPU for inference
 
80
  video = pipe(
81
  image,
82
  prompt,
83
+ num_inference_steps=25,
84
  guidance_scale=7.0,
85
  use_dynamic_cfg=True,
86
  generator=torch.Generator(device="cpu").manual_seed(seed)