Warvito commited on
Commit
df06350
1 Parent(s): f0df512

Create README.md

Browse files
Files changed (1) hide show
  1. README.md +70 -0
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: text-to-video
3
+ ---
4
+ AnimateDiff is a method that allows you to create videos using pre-existing Stable Diffusion Text to Image models.
5
+
6
+ Converted https://huggingface.co/guoyww/animatediff/blob/main/mm_sdxl_v10_beta.ckpt to Huggingface Diffusers format using following script based Diffuser's convetion script (available https://github.com/huggingface/diffusers/blob/main/scripts/convert_animatediff_motion_module_to_diffusers.py)
7
+
8
+ ```
9
+ import argparse
10
+
11
+ import torch
12
+
13
+ from diffusers import MotionAdapter
14
+
15
+
16
+ def convert_motion_module(original_state_dict):
17
+ converted_state_dict = {}
18
+ for k, v in original_state_dict.items():
19
+ if "pos_encoder" in k:
20
+ continue
21
+
22
+ else:
23
+ converted_state_dict[
24
+ k.replace(".norms.0", ".norm1")
25
+ .replace(".norms.1", ".norm2")
26
+ .replace(".ff_norm", ".norm3")
27
+ .replace(".attention_blocks.0", ".attn1")
28
+ .replace(".attention_blocks.1", ".attn2")
29
+ .replace(".temporal_transformer", "")
30
+ ] = v
31
+
32
+ return converted_state_dict
33
+
34
+
35
+ def get_args():
36
+ parser = argparse.ArgumentParser()
37
+ parser.add_argument("--ckpt_path", type=str, required=True)
38
+ parser.add_argument("--output_path", type=str, required=True)
39
+ parser.add_argument("--use_motion_mid_block", action="store_true")
40
+ parser.add_argument("--motion_max_seq_length", type=int, default=32)
41
+ parser.add_argument("--save_fp16", action="store_true")
42
+
43
+ return parser.parse_args()
44
+
45
+
46
+ if __name__ == "__main__":
47
+ args = get_args()
48
+
49
+ state_dict = torch.load(args.ckpt_path, map_location="cpu")
50
+ if "state_dict" in state_dict.keys():
51
+ state_dict = state_dict["state_dict"]
52
+
53
+ conv_state_dict = convert_motion_module(state_dict)
54
+ adapter = MotionAdapter(
55
+ use_motion_mid_block=False,
56
+ motion_max_seq_length=32,
57
+ block_out_channels=(320, 640, 1280),
58
+ )
59
+ # skip loading position embeddings
60
+ adapter.load_state_dict(conv_state_dict, strict=False)
61
+ adapter.save_pretrained(args.output_path)
62
+
63
+ if args.save_fp16:
64
+ adapter.to(torch.float16).save_pretrained(args.output_path, variant="fp16")
65
+
66
+ ```
67
+
68
+ The following example demonstrates how you can utilize the motion modules with an existing Stable Diffusion text to image model.
69
+
70
+ #TODO