hyejin.shin commited on
Commit
b4e0c02
·
1 Parent(s): 63b4b97

update lora weight

Browse files
README.md CHANGED
@@ -1,14 +1,14 @@
1
  # How to make
2
- - pretrained model: [epiCRealism](https://civitai.com/models/25694?modelVersionId=134065) + [hyper CFG lora 12steps](https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SD15-12steps-CFG-lora.safetensors)
3
  -> merge with lora weight 0.3
4
- - lora model: [AnimateLCM_sd15_t2v_lora.safetensors](https://huggingface.co/wangfuyun/AnimateLCM/blob/main/AnimateLCM_sd15_t2v_lora.safetensors)-> merge with lora weight 0.3
5
 
6
  ```python
7
  # Load the motion adapter
8
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=torch.float16)
9
 
10
  # load SD 1.5 based finetuned model
11
- model_id = "/home/hyejin2/test/models/epiCRealism-hyper-LCM.safetensors"
12
 
13
  pipe = AnimateDiffVideoToVideoPipeline.from_single_file(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
14
  pipe.save_pretrained("models/hello")
 
1
  # How to make
2
+ 1. pretrained model: [epiCRealism](https://civitai.com/models/25694?modelVersionId=134065) + [hyper CFG lora 12steps](https://huggingface.co/ByteDance/Hyper-SD/blob/main/Hyper-SD15-12steps-CFG-lora.safetensors)
3
  -> merge with lora weight 0.3
4
+ 2. model merged at step1 + lora model: [AnimateLCM_sd15_t2v_lora.safetensors](https://huggingface.co/wangfuyun/AnimateLCM/blob/main/AnimateLCM_sd15_t2v_lora.safetensors)-> merge with lora weight 0.8
5
 
6
  ```python
7
  # Load the motion adapter
8
  adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-3", torch_dtype=torch.float16)
9
 
10
  # load SD 1.5 based finetuned model
11
+ model_id = "/home/hyejin2/test/models/epiCRealism-hyper-LCM-8.safetensors"
12
 
13
  pipe = AnimateDiffVideoToVideoPipeline.from_single_file(model_id, motion_adapter=adapter, torch_dtype=torch.float16)
14
  pipe.save_pretrained("models/hello")
model_index.json CHANGED
@@ -15,7 +15,7 @@
15
  ],
16
  "scheduler": [
17
  "diffusers",
18
- "DPMSolverSinglestepScheduler"
19
  ],
20
  "text_encoder": [
21
  "transformers",
 
15
  ],
16
  "scheduler": [
17
  "diffusers",
18
+ "PNDMScheduler"
19
  ],
20
  "text_encoder": [
21
  "transformers",
scheduler/scheduler_config.json CHANGED
@@ -1,21 +1,15 @@
1
  {
2
- "_class_name": "DPMSolverSinglestepScheduler",
3
  "_diffusers_version": "0.29.2",
4
- "algorithm_type": "dpmsolver++",
5
- "beta_end": 0.0145,
6
- "beta_schedule": "linear",
7
- "beta_start": 0.00065,
8
- "dynamic_thresholding_ratio": 0.995,
9
- "final_sigmas_type": "zero",
10
- "lambda_min_clipped": -Infinity,
11
- "lower_order_final": true,
12
  "num_train_timesteps": 1000,
13
  "prediction_type": "epsilon",
14
- "sample_max_value": 1.0,
15
- "solver_order": 2,
16
- "solver_type": "midpoint",
17
- "thresholding": false,
18
- "trained_betas": null,
19
- "use_karras_sigmas": true,
20
- "variance_type": null
21
  }
 
1
  {
2
+ "_class_name": "PNDMScheduler",
3
  "_diffusers_version": "0.29.2",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
 
 
 
 
8
  "num_train_timesteps": 1000,
9
  "prediction_type": "epsilon",
10
+ "set_alpha_to_one": false,
11
+ "skip_prk_steps": true,
12
+ "steps_offset": 1,
13
+ "timestep_spacing": "leading",
14
+ "trained_betas": null
 
 
15
  }
unet/diffusion_pytorch_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b18686171a652fd1c511d0bcaacfe1f124808189fafaff0bcb800d615e27c1b7
3
  size 2554599720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9171838047148f15652dd4654bf3efa8c6a299669de6b5b782fa05623960782b
3
  size 2554599720