jaxmetaverse commited on
Commit
5ca481c
·
verified ·
1 Parent(s): 65eafc3

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +11 -0
  2. models/BiRefNet/RMBG-2.0/.gitattributes +40 -0
  3. models/BiRefNet/RMBG-2.0/BiRefNet_config.py +11 -0
  4. models/BiRefNet/RMBG-2.0/birefnet.py +2244 -0
  5. models/BiRefNet/RMBG-2.0/collage5.png +3 -0
  6. models/BiRefNet/RMBG-2.0/config.json +20 -0
  7. models/BiRefNet/RMBG-2.0/diagram1.png +0 -0
  8. models/BiRefNet/RMBG-2.0/model.safetensors +3 -0
  9. models/BiRefNet/RMBG-2.0/onnx/model.onnx +3 -0
  10. models/BiRefNet/RMBG-2.0/onnx/model_bnb4.onnx +3 -0
  11. models/BiRefNet/RMBG-2.0/onnx/model_fp16.onnx +3 -0
  12. models/BiRefNet/RMBG-2.0/onnx/model_int8.onnx +3 -0
  13. models/BiRefNet/RMBG-2.0/onnx/model_q4.onnx +3 -0
  14. models/BiRefNet/RMBG-2.0/onnx/model_q4f16.onnx +3 -0
  15. models/BiRefNet/RMBG-2.0/onnx/model_quantized.onnx +3 -0
  16. models/BiRefNet/RMBG-2.0/onnx/model_uint8.onnx +3 -0
  17. models/BiRefNet/RMBG-2.0/preprocessor_config.json +23 -0
  18. models/BiRefNet/RMBG-2.0/pytorch_model.bin +3 -0
  19. models/BiRefNet/RMBG-2.0/t4.png +3 -0
  20. models/BiRefNet/pth/BiRefNet-general-epoch_244.pth +3 -0
  21. models/CogVideo/CogVideoX-5b-1.5/scheduler/scheduler_config.json +18 -0
  22. models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/config.json +32 -0
  23. models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/diffusion_pytorch_model-00001-of-00003.safetensors +3 -0
  24. models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/diffusion_pytorch_model-00002-of-00003.safetensors +3 -0
  25. models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/diffusion_pytorch_model-00003-of-00003.safetensors +3 -0
  26. models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/diffusion_pytorch_model.safetensors.index.json +0 -0
  27. models/CogVideo/CogVideoX-5b-1.5/vae/config.json +39 -0
  28. models/CogVideo/CogVideoX-5b-1.5/vae/diffusion_pytorch_model.safetensors +3 -0
  29. models/CogVideo/CogVideoX-5b-I2V/.gitattributes +35 -0
  30. models/CogVideo/CogVideoX-5b-I2V/LICENSE +71 -0
  31. models/CogVideo/CogVideoX-5b-I2V/README.md +280 -0
  32. models/CogVideo/CogVideoX-5b-I2V/README_zh.md +252 -0
  33. models/CogVideo/CogVideoX-5b-I2V/configuration.json +1 -0
  34. models/CogVideo/CogVideoX-5b-I2V/model_index.json +24 -0
  35. models/CogVideo/CogVideoX-5b-I2V/scheduler/scheduler_config.json +18 -0
  36. models/CogVideo/CogVideoX-5b-I2V/transformer/config.json +29 -0
  37. models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00001-of-00003.safetensors +3 -0
  38. models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00002-of-00003.safetensors +3 -0
  39. models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00003-of-00003.safetensors +3 -0
  40. models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model.safetensors.index.json +0 -0
  41. models/CogVideo/CogVideoX-5b-I2V/vae/config.json +39 -0
  42. models/CogVideo/CogVideoX-5b-I2V/vae/diffusion_pytorch_model.safetensors +3 -0
  43. models/CogVideo/CogVideoX-5b/scheduler/scheduler_config.json +18 -0
  44. models/CogVideo/CogVideoX-5b/transformer/config.json +28 -0
  45. models/CogVideo/CogVideoX-5b/transformer/diffusion_pytorch_model-00001-of-00002.safetensors +3 -0
  46. models/CogVideo/CogVideoX-5b/transformer/diffusion_pytorch_model-00002-of-00002.safetensors +3 -0
  47. models/CogVideo/CogVideoX-5b/transformer/diffusion_pytorch_model.safetensors.index.json +0 -0
  48. models/CogVideo/CogVideoX-5b/vae/config.json +40 -0
  49. models/CogVideo/CogVideoX-5b/vae/diffusion_pytorch_model.safetensors +3 -0
  50. models/CogVideo/CogVideoX-Fun-V1.1-5b-Control/scheduler/scheduler_config.json +18 -0
.gitattributes CHANGED
@@ -37,3 +37,14 @@ models/checkpoints/memo/misc/face_analysis/misc/face_analysis/models/face_landma
37
  models/checkpoints/memo/misc/face_analysis/models/face_landmarker_v2_with_blendshapes.task filter=lfs diff=lfs merge=lfs -text
38
  models/diffusers/models--ZhengPeng7--BiRefNet/blobs/77277264c0e8c74149d3ff2fade4fd8176965b7108f3c5fc3b8c9c811edb4519 filter=lfs diff=lfs merge=lfs -text
39
  models/diffusers/models--huanngzh--mv-adapter/blobs/260e486d507247db30601d22de317e3f9c07f75a29912d43ed5c3a4aab4db4c9 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
37
  models/checkpoints/memo/misc/face_analysis/models/face_landmarker_v2_with_blendshapes.task filter=lfs diff=lfs merge=lfs -text
38
  models/diffusers/models--ZhengPeng7--BiRefNet/blobs/77277264c0e8c74149d3ff2fade4fd8176965b7108f3c5fc3b8c9c811edb4519 filter=lfs diff=lfs merge=lfs -text
39
  models/diffusers/models--huanngzh--mv-adapter/blobs/260e486d507247db30601d22de317e3f9c07f75a29912d43ed5c3a4aab4db4c9 filter=lfs diff=lfs merge=lfs -text
40
+ models/BiRefNet/RMBG-2.0/collage5.png filter=lfs diff=lfs merge=lfs -text
41
+ models/BiRefNet/RMBG-2.0/t4.png filter=lfs diff=lfs merge=lfs -text
42
+ models/FILM/L1/saved_model/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
43
+ models/FILM/Style/saved_model/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
44
+ models/FILM/VGG/saved_model/variables/variables.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
45
+ models/Janus-Pro/Janus-Pro-7B/janus_pro_teaser2.png filter=lfs diff=lfs merge=lfs -text
46
+ models/OmniGen/OmniGen-v1/demo_cases.png filter=lfs diff=lfs merge=lfs -text
47
+ models/blip/models--Salesforce--blip-image-captioning-base/blobs/d6638651a5526cc2ede56f2b5104d6851b0755816d220e5e046870430180c767 filter=lfs diff=lfs merge=lfs -text
48
+ models/blip/models--Salesforce--blip-vqa-base/blobs/33786eed34def0c95fa948128cb4386be9b9219aa2c2e25f1c9c744692121bb7 filter=lfs diff=lfs merge=lfs -text
49
+ models/clip_interrogator/models--timm--vit_large_patch14_clip_224.openai/blobs/9ce2e8a8ebfff3793d7d375ad6d3c35cb9aebf3de7ace0fc7308accab7cd207e filter=lfs diff=lfs merge=lfs -text
50
+ models/x-portrait/model_state-415001.th filter=lfs diff=lfs merge=lfs -text
models/BiRefNet/RMBG-2.0/.gitattributes ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model_not_working.not_safetensors filter=lfs diff=lfs merge=lfs -text
37
+ t4.png filter=lfs diff=lfs merge=lfs -text
38
+ collage.png filter=lfs diff=lfs merge=lfs -text
39
+ collage3.png filter=lfs diff=lfs merge=lfs -text
40
+ collage5.png filter=lfs diff=lfs merge=lfs -text
models/BiRefNet/RMBG-2.0/BiRefNet_config.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+
3
+ class BiRefNetConfig(PretrainedConfig):
4
+ model_type = "SegformerForSemanticSegmentation"
5
+ def __init__(
6
+ self,
7
+ bb_pretrained=False,
8
+ **kwargs
9
+ ):
10
+ self.bb_pretrained = bb_pretrained
11
+ super().__init__(**kwargs)
models/BiRefNet/RMBG-2.0/birefnet.py ADDED
@@ -0,0 +1,2244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### config.py
2
+
3
+ import os
4
+ import math
5
+
6
+
7
+ class Config():
8
+ def __init__(self) -> None:
9
+ # PATH settings
10
+ self.sys_home_dir = os.path.expanduser('~') # Make up your file system as: SYS_HOME_DIR/codes/dis/BiRefNet, SYS_HOME_DIR/datasets/dis/xx, SYS_HOME_DIR/weights/xx
11
+
12
+ # TASK settings
13
+ self.task = ['DIS5K', 'COD', 'HRSOD', 'DIS5K+HRSOD+HRS10K', 'P3M-10k'][0]
14
+ self.training_set = {
15
+ 'DIS5K': ['DIS-TR', 'DIS-TR+DIS-TE1+DIS-TE2+DIS-TE3+DIS-TE4'][0],
16
+ 'COD': 'TR-COD10K+TR-CAMO',
17
+ 'HRSOD': ['TR-DUTS', 'TR-HRSOD', 'TR-UHRSD', 'TR-DUTS+TR-HRSOD', 'TR-DUTS+TR-UHRSD', 'TR-HRSOD+TR-UHRSD', 'TR-DUTS+TR-HRSOD+TR-UHRSD'][5],
18
+ 'DIS5K+HRSOD+HRS10K': 'DIS-TE1+DIS-TE2+DIS-TE3+DIS-TE4+DIS-TR+TE-HRS10K+TE-HRSOD+TE-UHRSD+TR-HRS10K+TR-HRSOD+TR-UHRSD', # leave DIS-VD for evaluation.
19
+ 'P3M-10k': 'TR-P3M-10k',
20
+ }[self.task]
21
+ self.prompt4loc = ['dense', 'sparse'][0]
22
+
23
+ # Faster-Training settings
24
+ self.load_all = True
25
+ self.compile = True # 1. Trigger CPU memory leak in some extend, which is an inherent problem of PyTorch.
26
+ # Machines with > 70GB CPU memory can run the whole training on DIS5K with default setting.
27
+ # 2. Higher PyTorch version may fix it: https://github.com/pytorch/pytorch/issues/119607.
28
+ # 3. But compile in Pytorch > 2.0.1 seems to bring no acceleration for training.
29
+ self.precisionHigh = True
30
+
31
+ # MODEL settings
32
+ self.ms_supervision = True
33
+ self.out_ref = self.ms_supervision and True
34
+ self.dec_ipt = True
35
+ self.dec_ipt_split = True
36
+ self.cxt_num = [0, 3][1] # multi-scale skip connections from encoder
37
+ self.mul_scl_ipt = ['', 'add', 'cat'][2]
38
+ self.dec_att = ['', 'ASPP', 'ASPPDeformable'][2]
39
+ self.squeeze_block = ['', 'BasicDecBlk_x1', 'ResBlk_x4', 'ASPP_x3', 'ASPPDeformable_x3'][1]
40
+ self.dec_blk = ['BasicDecBlk', 'ResBlk', 'HierarAttDecBlk'][0]
41
+
42
+ # TRAINING settings
43
+ self.batch_size = 4
44
+ self.IoU_finetune_last_epochs = [
45
+ 0,
46
+ {
47
+ 'DIS5K': -50,
48
+ 'COD': -20,
49
+ 'HRSOD': -20,
50
+ 'DIS5K+HRSOD+HRS10K': -20,
51
+ 'P3M-10k': -20,
52
+ }[self.task]
53
+ ][1] # choose 0 to skip
54
+ self.lr = (1e-4 if 'DIS5K' in self.task else 1e-5) * math.sqrt(self.batch_size / 4) # DIS needs high lr to converge faster. Adapt the lr linearly
55
+ self.size = 1024
56
+ self.num_workers = max(4, self.batch_size) # will be decrease to min(it, batch_size) at the initialization of the data_loader
57
+
58
+ # Backbone settings
59
+ self.bb = [
60
+ 'vgg16', 'vgg16bn', 'resnet50', # 0, 1, 2
61
+ 'swin_v1_t', 'swin_v1_s', # 3, 4
62
+ 'swin_v1_b', 'swin_v1_l', # 5-bs9, 6-bs4
63
+ 'pvt_v2_b0', 'pvt_v2_b1', # 7, 8
64
+ 'pvt_v2_b2', 'pvt_v2_b5', # 9-bs10, 10-bs5
65
+ ][6]
66
+ self.lateral_channels_in_collection = {
67
+ 'vgg16': [512, 256, 128, 64], 'vgg16bn': [512, 256, 128, 64], 'resnet50': [1024, 512, 256, 64],
68
+ 'pvt_v2_b2': [512, 320, 128, 64], 'pvt_v2_b5': [512, 320, 128, 64],
69
+ 'swin_v1_b': [1024, 512, 256, 128], 'swin_v1_l': [1536, 768, 384, 192],
70
+ 'swin_v1_t': [768, 384, 192, 96], 'swin_v1_s': [768, 384, 192, 96],
71
+ 'pvt_v2_b0': [256, 160, 64, 32], 'pvt_v2_b1': [512, 320, 128, 64],
72
+ }[self.bb]
73
+ if self.mul_scl_ipt == 'cat':
74
+ self.lateral_channels_in_collection = [channel * 2 for channel in self.lateral_channels_in_collection]
75
+ self.cxt = self.lateral_channels_in_collection[1:][::-1][-self.cxt_num:] if self.cxt_num else []
76
+
77
+ # MODEL settings - inactive
78
+ self.lat_blk = ['BasicLatBlk'][0]
79
+ self.dec_channels_inter = ['fixed', 'adap'][0]
80
+ self.refine = ['', 'itself', 'RefUNet', 'Refiner', 'RefinerPVTInChannels4'][0]
81
+ self.progressive_ref = self.refine and True
82
+ self.ender = self.progressive_ref and False
83
+ self.scale = self.progressive_ref and 2
84
+ self.auxiliary_classification = False # Only for DIS5K, where class labels are saved in `dataset.py`.
85
+ self.refine_iteration = 1
86
+ self.freeze_bb = False
87
+ self.model = [
88
+ 'BiRefNet',
89
+ ][0]
90
+ if self.dec_blk == 'HierarAttDecBlk':
91
+ self.batch_size = 2 ** [0, 1, 2, 3, 4][2]
92
+
93
+ # TRAINING settings - inactive
94
+ self.preproc_methods = ['flip', 'enhance', 'rotate', 'pepper', 'crop'][:4]
95
+ self.optimizer = ['Adam', 'AdamW'][1]
96
+ self.lr_decay_epochs = [1e5] # Set to negative N to decay the lr in the last N-th epoch.
97
+ self.lr_decay_rate = 0.5
98
+ # Loss
99
+ self.lambdas_pix_last = {
100
+ # not 0 means opening this loss
101
+ # original rate -- 1 : 30 : 1.5 : 0.2, bce x 30
102
+ 'bce': 30 * 1, # high performance
103
+ 'iou': 0.5 * 1, # 0 / 255
104
+ 'iou_patch': 0.5 * 0, # 0 / 255, win_size = (64, 64)
105
+ 'mse': 150 * 0, # can smooth the saliency map
106
+ 'triplet': 3 * 0,
107
+ 'reg': 100 * 0,
108
+ 'ssim': 10 * 1, # help contours,
109
+ 'cnt': 5 * 0, # help contours
110
+ 'structure': 5 * 0, # structure loss from codes of MVANet. A little improvement on DIS-TE[1,2,3], a bit more decrease on DIS-TE4.
111
+ }
112
+ self.lambdas_cls = {
113
+ 'ce': 5.0
114
+ }
115
+ # Adv
116
+ self.lambda_adv_g = 10. * 0 # turn to 0 to avoid adv training
117
+ self.lambda_adv_d = 3. * (self.lambda_adv_g > 0)
118
+
119
+ # PATH settings - inactive
120
+ self.data_root_dir = os.path.join(self.sys_home_dir, 'datasets/dis')
121
+ self.weights_root_dir = os.path.join(self.sys_home_dir, 'weights')
122
+ self.weights = {
123
+ 'pvt_v2_b2': os.path.join(self.weights_root_dir, 'pvt_v2_b2.pth'),
124
+ 'pvt_v2_b5': os.path.join(self.weights_root_dir, ['pvt_v2_b5.pth', 'pvt_v2_b5_22k.pth'][0]),
125
+ 'swin_v1_b': os.path.join(self.weights_root_dir, ['swin_base_patch4_window12_384_22kto1k.pth', 'swin_base_patch4_window12_384_22k.pth'][0]),
126
+ 'swin_v1_l': os.path.join(self.weights_root_dir, ['swin_large_patch4_window12_384_22kto1k.pth', 'swin_large_patch4_window12_384_22k.pth'][0]),
127
+ 'swin_v1_t': os.path.join(self.weights_root_dir, ['swin_tiny_patch4_window7_224_22kto1k_finetune.pth'][0]),
128
+ 'swin_v1_s': os.path.join(self.weights_root_dir, ['swin_small_patch4_window7_224_22kto1k_finetune.pth'][0]),
129
+ 'pvt_v2_b0': os.path.join(self.weights_root_dir, ['pvt_v2_b0.pth'][0]),
130
+ 'pvt_v2_b1': os.path.join(self.weights_root_dir, ['pvt_v2_b1.pth'][0]),
131
+ }
132
+
133
+ # Callbacks - inactive
134
+ self.verbose_eval = True
135
+ self.only_S_MAE = False
136
+ self.use_fp16 = False # Bugs. It may cause nan in training.
137
+ self.SDPA_enabled = False # Bugs. Slower and errors occur in multi-GPUs
138
+
139
+ # others
140
+ self.device = [0, 'cpu'][0] # .to(0) == .to('cuda:0')
141
+
142
+ self.batch_size_valid = 1
143
+ self.rand_seed = 7
144
+ # run_sh_file = [f for f in os.listdir('.') if 'train.sh' == f] + [os.path.join('..', f) for f in os.listdir('..') if 'train.sh' == f]
145
+ # with open(run_sh_file[0], 'r') as f:
146
+ # lines = f.readlines()
147
+ # self.save_last = int([l.strip() for l in lines if '"{}")'.format(self.task) in l and 'val_last=' in l][0].split('val_last=')[-1].split()[0])
148
+ # self.save_step = int([l.strip() for l in lines if '"{}")'.format(self.task) in l and 'step=' in l][0].split('step=')[-1].split()[0])
149
+ # self.val_step = [0, self.save_step][0]
150
+
151
+ def print_task(self) -> None:
152
+ # Return task for choosing settings in shell scripts.
153
+ print(self.task)
154
+
155
+
156
+
157
+ ### models/backbones/pvt_v2.py
158
+
159
+ import torch
160
+ import torch.nn as nn
161
+ from functools import partial
162
+
163
+ from timm.models.layers import DropPath, to_2tuple, trunc_normal_
164
+ from timm.models.registry import register_model
165
+
166
+ import math
167
+
168
+ # from config import Config
169
+
170
+ # config = Config()
171
+
172
+ class Mlp(nn.Module):
173
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
174
+ super().__init__()
175
+ out_features = out_features or in_features
176
+ hidden_features = hidden_features or in_features
177
+ self.fc1 = nn.Linear(in_features, hidden_features)
178
+ self.dwconv = DWConv(hidden_features)
179
+ self.act = act_layer()
180
+ self.fc2 = nn.Linear(hidden_features, out_features)
181
+ self.drop = nn.Dropout(drop)
182
+
183
+ self.apply(self._init_weights)
184
+
185
+ def _init_weights(self, m):
186
+ if isinstance(m, nn.Linear):
187
+ trunc_normal_(m.weight, std=.02)
188
+ if isinstance(m, nn.Linear) and m.bias is not None:
189
+ nn.init.constant_(m.bias, 0)
190
+ elif isinstance(m, nn.LayerNorm):
191
+ nn.init.constant_(m.bias, 0)
192
+ nn.init.constant_(m.weight, 1.0)
193
+ elif isinstance(m, nn.Conv2d):
194
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
195
+ fan_out //= m.groups
196
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
197
+ if m.bias is not None:
198
+ m.bias.data.zero_()
199
+
200
+ def forward(self, x, H, W):
201
+ x = self.fc1(x)
202
+ x = self.dwconv(x, H, W)
203
+ x = self.act(x)
204
+ x = self.drop(x)
205
+ x = self.fc2(x)
206
+ x = self.drop(x)
207
+ return x
208
+
209
+
210
+ class Attention(nn.Module):
211
+ def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
212
+ super().__init__()
213
+ assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
214
+
215
+ self.dim = dim
216
+ self.num_heads = num_heads
217
+ head_dim = dim // num_heads
218
+ self.scale = qk_scale or head_dim ** -0.5
219
+
220
+ self.q = nn.Linear(dim, dim, bias=qkv_bias)
221
+ self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
222
+ self.attn_drop_prob = attn_drop
223
+ self.attn_drop = nn.Dropout(attn_drop)
224
+ self.proj = nn.Linear(dim, dim)
225
+ self.proj_drop = nn.Dropout(proj_drop)
226
+
227
+ self.sr_ratio = sr_ratio
228
+ if sr_ratio > 1:
229
+ self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
230
+ self.norm = nn.LayerNorm(dim)
231
+
232
+ self.apply(self._init_weights)
233
+
234
+ def _init_weights(self, m):
235
+ if isinstance(m, nn.Linear):
236
+ trunc_normal_(m.weight, std=.02)
237
+ if isinstance(m, nn.Linear) and m.bias is not None:
238
+ nn.init.constant_(m.bias, 0)
239
+ elif isinstance(m, nn.LayerNorm):
240
+ nn.init.constant_(m.bias, 0)
241
+ nn.init.constant_(m.weight, 1.0)
242
+ elif isinstance(m, nn.Conv2d):
243
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
244
+ fan_out //= m.groups
245
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
246
+ if m.bias is not None:
247
+ m.bias.data.zero_()
248
+
249
+ def forward(self, x, H, W):
250
+ B, N, C = x.shape
251
+ q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
252
+
253
+ if self.sr_ratio > 1:
254
+ x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
255
+ x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
256
+ x_ = self.norm(x_)
257
+ kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
258
+ else:
259
+ kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
260
+ k, v = kv[0], kv[1]
261
+
262
+ if config.SDPA_enabled:
263
+ x = torch.nn.functional.scaled_dot_product_attention(
264
+ q, k, v,
265
+ attn_mask=None, dropout_p=self.attn_drop_prob, is_causal=False
266
+ ).transpose(1, 2).reshape(B, N, C)
267
+ else:
268
+ attn = (q @ k.transpose(-2, -1)) * self.scale
269
+ attn = attn.softmax(dim=-1)
270
+ attn = self.attn_drop(attn)
271
+
272
+ x = (attn @ v).transpose(1, 2).reshape(B, N, C)
273
+ x = self.proj(x)
274
+ x = self.proj_drop(x)
275
+
276
+ return x
277
+
278
+
279
+ class Block(nn.Module):
280
+
281
+ def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
282
+ drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1):
283
+ super().__init__()
284
+ self.norm1 = norm_layer(dim)
285
+ self.attn = Attention(
286
+ dim,
287
+ num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
288
+ attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
289
+ # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
290
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
291
+ self.norm2 = norm_layer(dim)
292
+ mlp_hidden_dim = int(dim * mlp_ratio)
293
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
294
+
295
+ self.apply(self._init_weights)
296
+
297
+ def _init_weights(self, m):
298
+ if isinstance(m, nn.Linear):
299
+ trunc_normal_(m.weight, std=.02)
300
+ if isinstance(m, nn.Linear) and m.bias is not None:
301
+ nn.init.constant_(m.bias, 0)
302
+ elif isinstance(m, nn.LayerNorm):
303
+ nn.init.constant_(m.bias, 0)
304
+ nn.init.constant_(m.weight, 1.0)
305
+ elif isinstance(m, nn.Conv2d):
306
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
307
+ fan_out //= m.groups
308
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
309
+ if m.bias is not None:
310
+ m.bias.data.zero_()
311
+
312
+ def forward(self, x, H, W):
313
+ x = x + self.drop_path(self.attn(self.norm1(x), H, W))
314
+ x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
315
+
316
+ return x
317
+
318
+
319
+ class OverlapPatchEmbed(nn.Module):
320
+ """ Image to Patch Embedding
321
+ """
322
+
323
+ def __init__(self, img_size=224, patch_size=7, stride=4, in_channels=3, embed_dim=768):
324
+ super().__init__()
325
+ img_size = to_2tuple(img_size)
326
+ patch_size = to_2tuple(patch_size)
327
+
328
+ self.img_size = img_size
329
+ self.patch_size = patch_size
330
+ self.H, self.W = img_size[0] // patch_size[0], img_size[1] // patch_size[1]
331
+ self.num_patches = self.H * self.W
332
+ self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=stride,
333
+ padding=(patch_size[0] // 2, patch_size[1] // 2))
334
+ self.norm = nn.LayerNorm(embed_dim)
335
+
336
+ self.apply(self._init_weights)
337
+
338
+ def _init_weights(self, m):
339
+ if isinstance(m, nn.Linear):
340
+ trunc_normal_(m.weight, std=.02)
341
+ if isinstance(m, nn.Linear) and m.bias is not None:
342
+ nn.init.constant_(m.bias, 0)
343
+ elif isinstance(m, nn.LayerNorm):
344
+ nn.init.constant_(m.bias, 0)
345
+ nn.init.constant_(m.weight, 1.0)
346
+ elif isinstance(m, nn.Conv2d):
347
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
348
+ fan_out //= m.groups
349
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
350
+ if m.bias is not None:
351
+ m.bias.data.zero_()
352
+
353
+ def forward(self, x):
354
+ x = self.proj(x)
355
+ _, _, H, W = x.shape
356
+ x = x.flatten(2).transpose(1, 2)
357
+ x = self.norm(x)
358
+
359
+ return x, H, W
360
+
361
+
362
+ class PyramidVisionTransformerImpr(nn.Module):
363
+ def __init__(self, img_size=224, patch_size=16, in_channels=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
364
+ num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
365
+ attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
366
+ depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1]):
367
+ super().__init__()
368
+ self.num_classes = num_classes
369
+ self.depths = depths
370
+
371
+ # patch_embed
372
+ self.patch_embed1 = OverlapPatchEmbed(img_size=img_size, patch_size=7, stride=4, in_channels=in_channels,
373
+ embed_dim=embed_dims[0])
374
+ self.patch_embed2 = OverlapPatchEmbed(img_size=img_size // 4, patch_size=3, stride=2, in_channels=embed_dims[0],
375
+ embed_dim=embed_dims[1])
376
+ self.patch_embed3 = OverlapPatchEmbed(img_size=img_size // 8, patch_size=3, stride=2, in_channels=embed_dims[1],
377
+ embed_dim=embed_dims[2])
378
+ self.patch_embed4 = OverlapPatchEmbed(img_size=img_size // 16, patch_size=3, stride=2, in_channels=embed_dims[2],
379
+ embed_dim=embed_dims[3])
380
+
381
+ # transformer encoder
382
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
383
+ cur = 0
384
+ self.block1 = nn.ModuleList([Block(
385
+ dim=embed_dims[0], num_heads=num_heads[0], mlp_ratio=mlp_ratios[0], qkv_bias=qkv_bias, qk_scale=qk_scale,
386
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
387
+ sr_ratio=sr_ratios[0])
388
+ for i in range(depths[0])])
389
+ self.norm1 = norm_layer(embed_dims[0])
390
+
391
+ cur += depths[0]
392
+ self.block2 = nn.ModuleList([Block(
393
+ dim=embed_dims[1], num_heads=num_heads[1], mlp_ratio=mlp_ratios[1], qkv_bias=qkv_bias, qk_scale=qk_scale,
394
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
395
+ sr_ratio=sr_ratios[1])
396
+ for i in range(depths[1])])
397
+ self.norm2 = norm_layer(embed_dims[1])
398
+
399
+ cur += depths[1]
400
+ self.block3 = nn.ModuleList([Block(
401
+ dim=embed_dims[2], num_heads=num_heads[2], mlp_ratio=mlp_ratios[2], qkv_bias=qkv_bias, qk_scale=qk_scale,
402
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
403
+ sr_ratio=sr_ratios[2])
404
+ for i in range(depths[2])])
405
+ self.norm3 = norm_layer(embed_dims[2])
406
+
407
+ cur += depths[2]
408
+ self.block4 = nn.ModuleList([Block(
409
+ dim=embed_dims[3], num_heads=num_heads[3], mlp_ratio=mlp_ratios[3], qkv_bias=qkv_bias, qk_scale=qk_scale,
410
+ drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + i], norm_layer=norm_layer,
411
+ sr_ratio=sr_ratios[3])
412
+ for i in range(depths[3])])
413
+ self.norm4 = norm_layer(embed_dims[3])
414
+
415
+ # classification head
416
+ # self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()
417
+
418
+ self.apply(self._init_weights)
419
+
420
+ def _init_weights(self, m):
421
+ if isinstance(m, nn.Linear):
422
+ trunc_normal_(m.weight, std=.02)
423
+ if isinstance(m, nn.Linear) and m.bias is not None:
424
+ nn.init.constant_(m.bias, 0)
425
+ elif isinstance(m, nn.LayerNorm):
426
+ nn.init.constant_(m.bias, 0)
427
+ nn.init.constant_(m.weight, 1.0)
428
+ elif isinstance(m, nn.Conv2d):
429
+ fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
430
+ fan_out //= m.groups
431
+ m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
432
+ if m.bias is not None:
433
+ m.bias.data.zero_()
434
+
435
+ def init_weights(self, pretrained=None):
436
+ if isinstance(pretrained, str):
437
+ logger = 1
438
+ #load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
439
+
440
+ def reset_drop_path(self, drop_path_rate):
441
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(self.depths))]
442
+ cur = 0
443
+ for i in range(self.depths[0]):
444
+ self.block1[i].drop_path.drop_prob = dpr[cur + i]
445
+
446
+ cur += self.depths[0]
447
+ for i in range(self.depths[1]):
448
+ self.block2[i].drop_path.drop_prob = dpr[cur + i]
449
+
450
+ cur += self.depths[1]
451
+ for i in range(self.depths[2]):
452
+ self.block3[i].drop_path.drop_prob = dpr[cur + i]
453
+
454
+ cur += self.depths[2]
455
+ for i in range(self.depths[3]):
456
+ self.block4[i].drop_path.drop_prob = dpr[cur + i]
457
+
458
+ def freeze_patch_emb(self):
459
+ self.patch_embed1.requires_grad = False
460
+
461
+ @torch.jit.ignore
462
+ def no_weight_decay(self):
463
+ return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better
464
+
465
+ def get_classifier(self):
466
+ return self.head
467
+
468
+ def reset_classifier(self, num_classes, global_pool=''):
469
+ self.num_classes = num_classes
470
+ self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
471
+
472
+ def forward_features(self, x):
473
+ B = x.shape[0]
474
+ outs = []
475
+
476
+ # stage 1
477
+ x, H, W = self.patch_embed1(x)
478
+ for i, blk in enumerate(self.block1):
479
+ x = blk(x, H, W)
480
+ x = self.norm1(x)
481
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
482
+ outs.append(x)
483
+
484
+ # stage 2
485
+ x, H, W = self.patch_embed2(x)
486
+ for i, blk in enumerate(self.block2):
487
+ x = blk(x, H, W)
488
+ x = self.norm2(x)
489
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
490
+ outs.append(x)
491
+
492
+ # stage 3
493
+ x, H, W = self.patch_embed3(x)
494
+ for i, blk in enumerate(self.block3):
495
+ x = blk(x, H, W)
496
+ x = self.norm3(x)
497
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
498
+ outs.append(x)
499
+
500
+ # stage 4
501
+ x, H, W = self.patch_embed4(x)
502
+ for i, blk in enumerate(self.block4):
503
+ x = blk(x, H, W)
504
+ x = self.norm4(x)
505
+ x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
506
+ outs.append(x)
507
+
508
+ return outs
509
+
510
+ # return x.mean(dim=1)
511
+
512
+ def forward(self, x):
513
+ x = self.forward_features(x)
514
+ # x = self.head(x)
515
+
516
+ return x
517
+
518
+
519
+ class DWConv(nn.Module):
520
+ def __init__(self, dim=768):
521
+ super(DWConv, self).__init__()
522
+ self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
523
+
524
+ def forward(self, x, H, W):
525
+ B, N, C = x.shape
526
+ x = x.transpose(1, 2).view(B, C, H, W).contiguous()
527
+ x = self.dwconv(x)
528
+ x = x.flatten(2).transpose(1, 2)
529
+
530
+ return x
531
+
532
+
533
+ def _conv_filter(state_dict, patch_size=16):
534
+ """ convert patch embedding weight from manual patchify + linear proj to conv"""
535
+ out_dict = {}
536
+ for k, v in state_dict.items():
537
+ if 'patch_embed.proj.weight' in k:
538
+ v = v.reshape((v.shape[0], 3, patch_size, patch_size))
539
+ out_dict[k] = v
540
+
541
+ return out_dict
542
+
543
+
544
+ ## @register_model
545
+ class pvt_v2_b0(PyramidVisionTransformerImpr):
546
+ def __init__(self, **kwargs):
547
+ super(pvt_v2_b0, self).__init__(
548
+ patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
549
+ qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
550
+ drop_rate=0.0, drop_path_rate=0.1)
551
+
552
+
553
+
554
+ ## @register_model
555
+ class pvt_v2_b1(PyramidVisionTransformerImpr):
556
+ def __init__(self, **kwargs):
557
+ super(pvt_v2_b1, self).__init__(
558
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
559
+ qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],
560
+ drop_rate=0.0, drop_path_rate=0.1)
561
+
562
+ ## @register_model
563
+ class pvt_v2_b2(PyramidVisionTransformerImpr):
564
+ def __init__(self, in_channels=3, **kwargs):
565
+ super(pvt_v2_b2, self).__init__(
566
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
567
+ qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],
568
+ drop_rate=0.0, drop_path_rate=0.1, in_channels=in_channels)
569
+
570
+ ## @register_model
571
+ class pvt_v2_b3(PyramidVisionTransformerImpr):
572
+ def __init__(self, **kwargs):
573
+ super(pvt_v2_b3, self).__init__(
574
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
575
+ qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 18, 3], sr_ratios=[8, 4, 2, 1],
576
+ drop_rate=0.0, drop_path_rate=0.1)
577
+
578
+ ## @register_model
579
+ class pvt_v2_b4(PyramidVisionTransformerImpr):
580
+ def __init__(self, **kwargs):
581
+ super(pvt_v2_b4, self).__init__(
582
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],
583
+ qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 8, 27, 3], sr_ratios=[8, 4, 2, 1],
584
+ drop_rate=0.0, drop_path_rate=0.1)
585
+
586
+
587
+ ## @register_model
588
+ class pvt_v2_b5(PyramidVisionTransformerImpr):
589
+ def __init__(self, **kwargs):
590
+ super(pvt_v2_b5, self).__init__(
591
+ patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],
592
+ qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1],
593
+ drop_rate=0.0, drop_path_rate=0.1)
594
+
595
+
596
+
597
+ ### models/backbones/swin_v1.py
598
+
599
+ # --------------------------------------------------------
600
+ # Swin Transformer
601
+ # Copyright (c) 2021 Microsoft
602
+ # Licensed under The MIT License [see LICENSE for details]
603
+ # Written by Ze Liu, Yutong Lin, Yixuan Wei
604
+ # --------------------------------------------------------
605
+
606
+ import torch
607
+ import torch.nn as nn
608
+ import torch.nn.functional as F
609
+ import torch.utils.checkpoint as checkpoint
610
+ import numpy as np
611
+ from timm.models.layers import DropPath, to_2tuple, trunc_normal_
612
+
613
+ # from config import Config
614
+
615
+
616
+ # config = Config()
617
+
618
+ class Mlp(nn.Module):
619
+ """ Multilayer perceptron."""
620
+
621
+ def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
622
+ super().__init__()
623
+ out_features = out_features or in_features
624
+ hidden_features = hidden_features or in_features
625
+ self.fc1 = nn.Linear(in_features, hidden_features)
626
+ self.act = act_layer()
627
+ self.fc2 = nn.Linear(hidden_features, out_features)
628
+ self.drop = nn.Dropout(drop)
629
+
630
+ def forward(self, x):
631
+ x = self.fc1(x)
632
+ x = self.act(x)
633
+ x = self.drop(x)
634
+ x = self.fc2(x)
635
+ x = self.drop(x)
636
+ return x
637
+
638
+
639
+ def window_partition(x, window_size):
640
+ """
641
+ Args:
642
+ x: (B, H, W, C)
643
+ window_size (int): window size
644
+
645
+ Returns:
646
+ windows: (num_windows*B, window_size, window_size, C)
647
+ """
648
+ B, H, W, C = x.shape
649
+ x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
650
+ windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
651
+ return windows
652
+
653
+
654
+ def window_reverse(windows, window_size, H, W):
655
+ """
656
+ Args:
657
+ windows: (num_windows*B, window_size, window_size, C)
658
+ window_size (int): Window size
659
+ H (int): Height of image
660
+ W (int): Width of image
661
+
662
+ Returns:
663
+ x: (B, H, W, C)
664
+ """
665
+ B = int(windows.shape[0] / (H * W / window_size / window_size))
666
+ x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
667
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
668
+ return x
669
+
670
+
671
+ class WindowAttention(nn.Module):
672
+ """ Window based multi-head self attention (W-MSA) module with relative position bias.
673
+ It supports both of shifted and non-shifted window.
674
+
675
+ Args:
676
+ dim (int): Number of input channels.
677
+ window_size (tuple[int]): The height and width of the window.
678
+ num_heads (int): Number of attention heads.
679
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
680
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
681
+ attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
682
+ proj_drop (float, optional): Dropout ratio of output. Default: 0.0
683
+ """
684
+
685
+ def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
686
+
687
+ super().__init__()
688
+ self.dim = dim
689
+ self.window_size = window_size # Wh, Ww
690
+ self.num_heads = num_heads
691
+ head_dim = dim // num_heads
692
+ self.scale = qk_scale or head_dim ** -0.5
693
+
694
+ # define a parameter table of relative position bias
695
+ self.relative_position_bias_table = nn.Parameter(
696
+ torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
697
+
698
+ # get pair-wise relative position index for each token inside the window
699
+ coords_h = torch.arange(self.window_size[0])
700
+ coords_w = torch.arange(self.window_size[1])
701
+ coords = torch.stack(torch.meshgrid([coords_h, coords_w], indexing='ij')) # 2, Wh, Ww
702
+ coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
703
+ relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
704
+ relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
705
+ relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
706
+ relative_coords[:, :, 1] += self.window_size[1] - 1
707
+ relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
708
+ relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
709
+ self.register_buffer("relative_position_index", relative_position_index)
710
+
711
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
712
+ self.attn_drop_prob = attn_drop
713
+ self.attn_drop = nn.Dropout(attn_drop)
714
+ self.proj = nn.Linear(dim, dim)
715
+ self.proj_drop = nn.Dropout(proj_drop)
716
+
717
+ trunc_normal_(self.relative_position_bias_table, std=.02)
718
+ self.softmax = nn.Softmax(dim=-1)
719
+
720
+ def forward(self, x, mask=None):
721
+ """ Forward function.
722
+
723
+ Args:
724
+ x: input features with shape of (num_windows*B, N, C)
725
+ mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
726
+ """
727
+ B_, N, C = x.shape
728
+ qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
729
+ q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
730
+
731
+ q = q * self.scale
732
+
733
+ if config.SDPA_enabled:
734
+ x = torch.nn.functional.scaled_dot_product_attention(
735
+ q, k, v,
736
+ attn_mask=None, dropout_p=self.attn_drop_prob, is_causal=False
737
+ ).transpose(1, 2).reshape(B_, N, C)
738
+ else:
739
+ attn = (q @ k.transpose(-2, -1))
740
+
741
+ relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
742
+ self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
743
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
744
+ attn = attn + relative_position_bias.unsqueeze(0)
745
+
746
+ if mask is not None:
747
+ nW = mask.shape[0]
748
+ attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
749
+ attn = attn.view(-1, self.num_heads, N, N)
750
+ attn = self.softmax(attn)
751
+ else:
752
+ attn = self.softmax(attn)
753
+
754
+ attn = self.attn_drop(attn)
755
+
756
+ x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
757
+ x = self.proj(x)
758
+ x = self.proj_drop(x)
759
+ return x
760
+
761
+
762
+ class SwinTransformerBlock(nn.Module):
763
+ """ Swin Transformer Block.
764
+
765
+ Args:
766
+ dim (int): Number of input channels.
767
+ num_heads (int): Number of attention heads.
768
+ window_size (int): Window size.
769
+ shift_size (int): Shift size for SW-MSA.
770
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
771
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
772
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
773
+ drop (float, optional): Dropout rate. Default: 0.0
774
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
775
+ drop_path (float, optional): Stochastic depth rate. Default: 0.0
776
+ act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
777
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
778
+ """
779
+
780
+ def __init__(self, dim, num_heads, window_size=7, shift_size=0,
781
+ mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
782
+ act_layer=nn.GELU, norm_layer=nn.LayerNorm):
783
+ super().__init__()
784
+ self.dim = dim
785
+ self.num_heads = num_heads
786
+ self.window_size = window_size
787
+ self.shift_size = shift_size
788
+ self.mlp_ratio = mlp_ratio
789
+ assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
790
+
791
+ self.norm1 = norm_layer(dim)
792
+ self.attn = WindowAttention(
793
+ dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
794
+ qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
795
+
796
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
797
+ self.norm2 = norm_layer(dim)
798
+ mlp_hidden_dim = int(dim * mlp_ratio)
799
+ self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
800
+
801
+ self.H = None
802
+ self.W = None
803
+
804
+ def forward(self, x, mask_matrix):
805
+ """ Forward function.
806
+
807
+ Args:
808
+ x: Input feature, tensor size (B, H*W, C).
809
+ H, W: Spatial resolution of the input feature.
810
+ mask_matrix: Attention mask for cyclic shift.
811
+ """
812
+ B, L, C = x.shape
813
+ H, W = self.H, self.W
814
+ assert L == H * W, "input feature has wrong size"
815
+
816
+ shortcut = x
817
+ x = self.norm1(x)
818
+ x = x.view(B, H, W, C)
819
+
820
+ # pad feature maps to multiples of window size
821
+ pad_l = pad_t = 0
822
+ pad_r = (self.window_size - W % self.window_size) % self.window_size
823
+ pad_b = (self.window_size - H % self.window_size) % self.window_size
824
+ x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
825
+ _, Hp, Wp, _ = x.shape
826
+
827
+ # cyclic shift
828
+ if self.shift_size > 0:
829
+ shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
830
+ attn_mask = mask_matrix
831
+ else:
832
+ shifted_x = x
833
+ attn_mask = None
834
+
835
+ # partition windows
836
+ x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
837
+ x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
838
+
839
+ # W-MSA/SW-MSA
840
+ attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
841
+
842
+ # merge windows
843
+ attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
844
+ shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
845
+
846
+ # reverse cyclic shift
847
+ if self.shift_size > 0:
848
+ x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
849
+ else:
850
+ x = shifted_x
851
+
852
+ if pad_r > 0 or pad_b > 0:
853
+ x = x[:, :H, :W, :].contiguous()
854
+
855
+ x = x.view(B, H * W, C)
856
+
857
+ # FFN
858
+ x = shortcut + self.drop_path(x)
859
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
860
+
861
+ return x
862
+
863
+
864
+ class PatchMerging(nn.Module):
865
+ """ Patch Merging Layer
866
+
867
+ Args:
868
+ dim (int): Number of input channels.
869
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
870
+ """
871
+ def __init__(self, dim, norm_layer=nn.LayerNorm):
872
+ super().__init__()
873
+ self.dim = dim
874
+ self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
875
+ self.norm = norm_layer(4 * dim)
876
+
877
+ def forward(self, x, H, W):
878
+ """ Forward function.
879
+
880
+ Args:
881
+ x: Input feature, tensor size (B, H*W, C).
882
+ H, W: Spatial resolution of the input feature.
883
+ """
884
+ B, L, C = x.shape
885
+ assert L == H * W, "input feature has wrong size"
886
+
887
+ x = x.view(B, H, W, C)
888
+
889
+ # padding
890
+ pad_input = (H % 2 == 1) or (W % 2 == 1)
891
+ if pad_input:
892
+ x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
893
+
894
+ x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
895
+ x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
896
+ x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
897
+ x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
898
+ x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
899
+ x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
900
+
901
+ x = self.norm(x)
902
+ x = self.reduction(x)
903
+
904
+ return x
905
+
906
+
907
+ class BasicLayer(nn.Module):
908
+ """ A basic Swin Transformer layer for one stage.
909
+
910
+ Args:
911
+ dim (int): Number of feature channels
912
+ depth (int): Depths of this stage.
913
+ num_heads (int): Number of attention head.
914
+ window_size (int): Local window size. Default: 7.
915
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
916
+ qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
917
+ qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
918
+ drop (float, optional): Dropout rate. Default: 0.0
919
+ attn_drop (float, optional): Attention dropout rate. Default: 0.0
920
+ drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
921
+ norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
922
+ downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
923
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
924
+ """
925
+
926
+ def __init__(self,
927
+ dim,
928
+ depth,
929
+ num_heads,
930
+ window_size=7,
931
+ mlp_ratio=4.,
932
+ qkv_bias=True,
933
+ qk_scale=None,
934
+ drop=0.,
935
+ attn_drop=0.,
936
+ drop_path=0.,
937
+ norm_layer=nn.LayerNorm,
938
+ downsample=None,
939
+ use_checkpoint=False):
940
+ super().__init__()
941
+ self.window_size = window_size
942
+ self.shift_size = window_size // 2
943
+ self.depth = depth
944
+ self.use_checkpoint = use_checkpoint
945
+
946
+ # build blocks
947
+ self.blocks = nn.ModuleList([
948
+ SwinTransformerBlock(
949
+ dim=dim,
950
+ num_heads=num_heads,
951
+ window_size=window_size,
952
+ shift_size=0 if (i % 2 == 0) else window_size // 2,
953
+ mlp_ratio=mlp_ratio,
954
+ qkv_bias=qkv_bias,
955
+ qk_scale=qk_scale,
956
+ drop=drop,
957
+ attn_drop=attn_drop,
958
+ drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
959
+ norm_layer=norm_layer)
960
+ for i in range(depth)])
961
+
962
+ # patch merging layer
963
+ if downsample is not None:
964
+ self.downsample = downsample(dim=dim, norm_layer=norm_layer)
965
+ else:
966
+ self.downsample = None
967
+
968
+ def forward(self, x, H, W):
969
+ """ Forward function.
970
+
971
+ Args:
972
+ x: Input feature, tensor size (B, H*W, C).
973
+ H, W: Spatial resolution of the input feature.
974
+ """
975
+
976
+ # calculate attention mask for SW-MSA
977
+ Hp = int(np.ceil(H / self.window_size)) * self.window_size
978
+ Wp = int(np.ceil(W / self.window_size)) * self.window_size
979
+ img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
980
+ h_slices = (slice(0, -self.window_size),
981
+ slice(-self.window_size, -self.shift_size),
982
+ slice(-self.shift_size, None))
983
+ w_slices = (slice(0, -self.window_size),
984
+ slice(-self.window_size, -self.shift_size),
985
+ slice(-self.shift_size, None))
986
+ cnt = 0
987
+ for h in h_slices:
988
+ for w in w_slices:
989
+ img_mask[:, h, w, :] = cnt
990
+ cnt += 1
991
+
992
+ mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
993
+ mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
994
+ attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
995
+ attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
996
+
997
+ for blk in self.blocks:
998
+ blk.H, blk.W = H, W
999
+ if self.use_checkpoint:
1000
+ x = checkpoint.checkpoint(blk, x, attn_mask)
1001
+ else:
1002
+ x = blk(x, attn_mask)
1003
+ if self.downsample is not None:
1004
+ x_down = self.downsample(x, H, W)
1005
+ Wh, Ww = (H + 1) // 2, (W + 1) // 2
1006
+ return x, H, W, x_down, Wh, Ww
1007
+ else:
1008
+ return x, H, W, x, H, W
1009
+
1010
+
1011
+ class PatchEmbed(nn.Module):
1012
+ """ Image to Patch Embedding
1013
+
1014
+ Args:
1015
+ patch_size (int): Patch token size. Default: 4.
1016
+ in_channels (int): Number of input image channels. Default: 3.
1017
+ embed_dim (int): Number of linear projection output channels. Default: 96.
1018
+ norm_layer (nn.Module, optional): Normalization layer. Default: None
1019
+ """
1020
+
1021
+ def __init__(self, patch_size=4, in_channels=3, embed_dim=96, norm_layer=None):
1022
+ super().__init__()
1023
+ patch_size = to_2tuple(patch_size)
1024
+ self.patch_size = patch_size
1025
+
1026
+ self.in_channels = in_channels
1027
+ self.embed_dim = embed_dim
1028
+
1029
+ self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
1030
+ if norm_layer is not None:
1031
+ self.norm = norm_layer(embed_dim)
1032
+ else:
1033
+ self.norm = None
1034
+
1035
+ def forward(self, x):
1036
+ """Forward function."""
1037
+ # padding
1038
+ _, _, H, W = x.size()
1039
+ if W % self.patch_size[1] != 0:
1040
+ x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
1041
+ if H % self.patch_size[0] != 0:
1042
+ x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
1043
+
1044
+ x = self.proj(x) # B C Wh Ww
1045
+ if self.norm is not None:
1046
+ Wh, Ww = x.size(2), x.size(3)
1047
+ x = x.flatten(2).transpose(1, 2)
1048
+ x = self.norm(x)
1049
+ x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
1050
+
1051
+ return x
1052
+
1053
+
1054
+ class SwinTransformer(nn.Module):
1055
+ """ Swin Transformer backbone.
1056
+ A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
1057
+ https://arxiv.org/pdf/2103.14030
1058
+
1059
+ Args:
1060
+ pretrain_img_size (int): Input image size for training the pretrained model,
1061
+ used in absolute postion embedding. Default 224.
1062
+ patch_size (int | tuple(int)): Patch size. Default: 4.
1063
+ in_channels (int): Number of input image channels. Default: 3.
1064
+ embed_dim (int): Number of linear projection output channels. Default: 96.
1065
+ depths (tuple[int]): Depths of each Swin Transformer stage.
1066
+ num_heads (tuple[int]): Number of attention head of each stage.
1067
+ window_size (int): Window size. Default: 7.
1068
+ mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
1069
+ qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
1070
+ qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
1071
+ drop_rate (float): Dropout rate.
1072
+ attn_drop_rate (float): Attention dropout rate. Default: 0.
1073
+ drop_path_rate (float): Stochastic depth rate. Default: 0.2.
1074
+ norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
1075
+ ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
1076
+ patch_norm (bool): If True, add normalization after patch embedding. Default: True.
1077
+ out_indices (Sequence[int]): Output from which stages.
1078
+ frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
1079
+ -1 means not freezing any parameters.
1080
+ use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
1081
+ """
1082
+
1083
+ def __init__(self,
1084
+ pretrain_img_size=224,
1085
+ patch_size=4,
1086
+ in_channels=3,
1087
+ embed_dim=96,
1088
+ depths=[2, 2, 6, 2],
1089
+ num_heads=[3, 6, 12, 24],
1090
+ window_size=7,
1091
+ mlp_ratio=4.,
1092
+ qkv_bias=True,
1093
+ qk_scale=None,
1094
+ drop_rate=0.,
1095
+ attn_drop_rate=0.,
1096
+ drop_path_rate=0.2,
1097
+ norm_layer=nn.LayerNorm,
1098
+ ape=False,
1099
+ patch_norm=True,
1100
+ out_indices=(0, 1, 2, 3),
1101
+ frozen_stages=-1,
1102
+ use_checkpoint=False):
1103
+ super().__init__()
1104
+
1105
+ self.pretrain_img_size = pretrain_img_size
1106
+ self.num_layers = len(depths)
1107
+ self.embed_dim = embed_dim
1108
+ self.ape = ape
1109
+ self.patch_norm = patch_norm
1110
+ self.out_indices = out_indices
1111
+ self.frozen_stages = frozen_stages
1112
+
1113
+ # split image into non-overlapping patches
1114
+ self.patch_embed = PatchEmbed(
1115
+ patch_size=patch_size, in_channels=in_channels, embed_dim=embed_dim,
1116
+ norm_layer=norm_layer if self.patch_norm else None)
1117
+
1118
+ # absolute position embedding
1119
+ if self.ape:
1120
+ pretrain_img_size = to_2tuple(pretrain_img_size)
1121
+ patch_size = to_2tuple(patch_size)
1122
+ patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]
1123
+
1124
+ self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))
1125
+ trunc_normal_(self.absolute_pos_embed, std=.02)
1126
+
1127
+ self.pos_drop = nn.Dropout(p=drop_rate)
1128
+
1129
+ # stochastic depth
1130
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
1131
+
1132
+ # build layers
1133
+ self.layers = nn.ModuleList()
1134
+ for i_layer in range(self.num_layers):
1135
+ layer = BasicLayer(
1136
+ dim=int(embed_dim * 2 ** i_layer),
1137
+ depth=depths[i_layer],
1138
+ num_heads=num_heads[i_layer],
1139
+ window_size=window_size,
1140
+ mlp_ratio=mlp_ratio,
1141
+ qkv_bias=qkv_bias,
1142
+ qk_scale=qk_scale,
1143
+ drop=drop_rate,
1144
+ attn_drop=attn_drop_rate,
1145
+ drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
1146
+ norm_layer=norm_layer,
1147
+ downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
1148
+ use_checkpoint=use_checkpoint)
1149
+ self.layers.append(layer)
1150
+
1151
+ num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
1152
+ self.num_features = num_features
1153
+
1154
+ # add a norm layer for each output
1155
+ for i_layer in out_indices:
1156
+ layer = norm_layer(num_features[i_layer])
1157
+ layer_name = f'norm{i_layer}'
1158
+ self.add_module(layer_name, layer)
1159
+
1160
+ self._freeze_stages()
1161
+
1162
+ def _freeze_stages(self):
1163
+ if self.frozen_stages >= 0:
1164
+ self.patch_embed.eval()
1165
+ for param in self.patch_embed.parameters():
1166
+ param.requires_grad = False
1167
+
1168
+ if self.frozen_stages >= 1 and self.ape:
1169
+ self.absolute_pos_embed.requires_grad = False
1170
+
1171
+ if self.frozen_stages >= 2:
1172
+ self.pos_drop.eval()
1173
+ for i in range(0, self.frozen_stages - 1):
1174
+ m = self.layers[i]
1175
+ m.eval()
1176
+ for param in m.parameters():
1177
+ param.requires_grad = False
1178
+
1179
+
1180
+ def forward(self, x):
1181
+ """Forward function."""
1182
+ x = self.patch_embed(x)
1183
+
1184
+ Wh, Ww = x.size(2), x.size(3)
1185
+ if self.ape:
1186
+ # interpolate the position embedding to the corresponding size
1187
+ absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')
1188
+ x = (x + absolute_pos_embed) # B Wh*Ww C
1189
+
1190
+ outs = []#x.contiguous()]
1191
+ x = x.flatten(2).transpose(1, 2)
1192
+ x = self.pos_drop(x)
1193
+ for i in range(self.num_layers):
1194
+ layer = self.layers[i]
1195
+ x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
1196
+
1197
+ if i in self.out_indices:
1198
+ norm_layer = getattr(self, f'norm{i}')
1199
+ x_out = norm_layer(x_out)
1200
+
1201
+ out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
1202
+ outs.append(out)
1203
+
1204
+ return tuple(outs)
1205
+
1206
+ def train(self, mode=True):
1207
+ """Convert the model into training mode while keep layers freezed."""
1208
+ super(SwinTransformer, self).train(mode)
1209
+ self._freeze_stages()
1210
+
1211
+ def swin_v1_t():
1212
+ model = SwinTransformer(embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7)
1213
+ return model
1214
+
1215
+ def swin_v1_s():
1216
+ model = SwinTransformer(embed_dim=96, depths=[2, 2, 18, 2], num_heads=[3, 6, 12, 24], window_size=7)
1217
+ return model
1218
+
1219
+ def swin_v1_b():
1220
+ model = SwinTransformer(embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12)
1221
+ return model
1222
+
1223
+ def swin_v1_l():
1224
+ model = SwinTransformer(embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12)
1225
+ return model
1226
+
1227
+
1228
+
1229
+ ### models/modules/deform_conv.py
1230
+
1231
+ import torch
1232
+ import torch.nn as nn
1233
+ from torchvision.ops import deform_conv2d
1234
+
1235
+
1236
+ class DeformableConv2d(nn.Module):
1237
+ def __init__(self,
1238
+ in_channels,
1239
+ out_channels,
1240
+ kernel_size=3,
1241
+ stride=1,
1242
+ padding=1,
1243
+ bias=False):
1244
+
1245
+ super(DeformableConv2d, self).__init__()
1246
+
1247
+ assert type(kernel_size) == tuple or type(kernel_size) == int
1248
+
1249
+ kernel_size = kernel_size if type(kernel_size) == tuple else (kernel_size, kernel_size)
1250
+ self.stride = stride if type(stride) == tuple else (stride, stride)
1251
+ self.padding = padding
1252
+
1253
+ self.offset_conv = nn.Conv2d(in_channels,
1254
+ 2 * kernel_size[0] * kernel_size[1],
1255
+ kernel_size=kernel_size,
1256
+ stride=stride,
1257
+ padding=self.padding,
1258
+ bias=True)
1259
+
1260
+ nn.init.constant_(self.offset_conv.weight, 0.)
1261
+ nn.init.constant_(self.offset_conv.bias, 0.)
1262
+
1263
+ self.modulator_conv = nn.Conv2d(in_channels,
1264
+ 1 * kernel_size[0] * kernel_size[1],
1265
+ kernel_size=kernel_size,
1266
+ stride=stride,
1267
+ padding=self.padding,
1268
+ bias=True)
1269
+
1270
+ nn.init.constant_(self.modulator_conv.weight, 0.)
1271
+ nn.init.constant_(self.modulator_conv.bias, 0.)
1272
+
1273
+ self.regular_conv = nn.Conv2d(in_channels,
1274
+ out_channels=out_channels,
1275
+ kernel_size=kernel_size,
1276
+ stride=stride,
1277
+ padding=self.padding,
1278
+ bias=bias)
1279
+
1280
+ def forward(self, x):
1281
+ #h, w = x.shape[2:]
1282
+ #max_offset = max(h, w)/4.
1283
+
1284
+ offset = self.offset_conv(x)#.clamp(-max_offset, max_offset)
1285
+ modulator = 2. * torch.sigmoid(self.modulator_conv(x))
1286
+
1287
+ x = deform_conv2d(
1288
+ input=x,
1289
+ offset=offset,
1290
+ weight=self.regular_conv.weight,
1291
+ bias=self.regular_conv.bias,
1292
+ padding=self.padding,
1293
+ mask=modulator,
1294
+ stride=self.stride,
1295
+ )
1296
+ return x
1297
+
1298
+
1299
+
1300
+
1301
+ ### utils.py
1302
+
1303
+ import torch.nn as nn
1304
+
1305
+
1306
+ def build_act_layer(act_layer):
1307
+ if act_layer == 'ReLU':
1308
+ return nn.ReLU(inplace=True)
1309
+ elif act_layer == 'SiLU':
1310
+ return nn.SiLU(inplace=True)
1311
+ elif act_layer == 'GELU':
1312
+ return nn.GELU()
1313
+
1314
+ raise NotImplementedError(f'build_act_layer does not support {act_layer}')
1315
+
1316
+
1317
+ def build_norm_layer(dim,
1318
+ norm_layer,
1319
+ in_format='channels_last',
1320
+ out_format='channels_last',
1321
+ eps=1e-6):
1322
+ layers = []
1323
+ if norm_layer == 'BN':
1324
+ if in_format == 'channels_last':
1325
+ layers.append(to_channels_first())
1326
+ layers.append(nn.BatchNorm2d(dim))
1327
+ if out_format == 'channels_last':
1328
+ layers.append(to_channels_last())
1329
+ elif norm_layer == 'LN':
1330
+ if in_format == 'channels_first':
1331
+ layers.append(to_channels_last())
1332
+ layers.append(nn.LayerNorm(dim, eps=eps))
1333
+ if out_format == 'channels_first':
1334
+ layers.append(to_channels_first())
1335
+ else:
1336
+ raise NotImplementedError(
1337
+ f'build_norm_layer does not support {norm_layer}')
1338
+ return nn.Sequential(*layers)
1339
+
1340
+
1341
+ class to_channels_first(nn.Module):
1342
+
1343
+ def __init__(self):
1344
+ super().__init__()
1345
+
1346
+ def forward(self, x):
1347
+ return x.permute(0, 3, 1, 2)
1348
+
1349
+
1350
+ class to_channels_last(nn.Module):
1351
+
1352
+ def __init__(self):
1353
+ super().__init__()
1354
+
1355
+ def forward(self, x):
1356
+ return x.permute(0, 2, 3, 1)
1357
+
1358
+
1359
+
1360
+ ### dataset.py
1361
+
1362
+ _class_labels_TR_sorted = (
1363
+ 'Airplane, Ant, Antenna, Archery, Axe, BabyCarriage, Bag, BalanceBeam, Balcony, Balloon, Basket, BasketballHoop, Beatle, Bed, Bee, Bench, Bicycle, '
1364
+ 'BicycleFrame, BicycleStand, Boat, Bonsai, BoomLift, Bridge, BunkBed, Butterfly, Button, Cable, CableLift, Cage, Camcorder, Cannon, Canoe, Car, '
1365
+ 'CarParkDropArm, Carriage, Cart, Caterpillar, CeilingLamp, Centipede, Chair, Clip, Clock, Clothes, CoatHanger, Comb, ConcretePumpTruck, Crack, Crane, '
1366
+ 'Cup, DentalChair, Desk, DeskChair, Diagram, DishRack, DoorHandle, Dragonfish, Dragonfly, Drum, Earphone, Easel, ElectricIron, Excavator, Eyeglasses, '
1367
+ 'Fan, Fence, Fencing, FerrisWheel, FireExtinguisher, Fishing, Flag, FloorLamp, Forklift, GasStation, Gate, Gear, Goal, Golf, GymEquipment, Hammock, '
1368
+ 'Handcart, Handcraft, Handrail, HangGlider, Harp, Harvester, Headset, Helicopter, Helmet, Hook, HorizontalBar, Hydrovalve, IroningTable, Jewelry, Key, '
1369
+ 'KidsPlayground, Kitchenware, Kite, Knife, Ladder, LaundryRack, Lightning, Lobster, Locust, Machine, MachineGun, MagazineRack, Mantis, Medal, MemorialArchway, '
1370
+ 'Microphone, Missile, MobileHolder, Monitor, Mosquito, Motorcycle, MovingTrolley, Mower, MusicPlayer, MusicStand, ObservationTower, Octopus, OilWell, '
1371
+ 'OlympicLogo, OperatingTable, OutdoorFitnessEquipment, Parachute, Pavilion, Piano, Pipe, PlowHarrow, PoleVault, Punchbag, Rack, Racket, Rifle, Ring, Robot, '
1372
+ 'RockClimbing, Rope, Sailboat, Satellite, Scaffold, Scale, Scissor, Scooter, Sculpture, Seadragon, Seahorse, Seal, SewingMachine, Ship, Shoe, ShoppingCart, '
1373
+ 'ShoppingTrolley, Shower, Shrimp, Signboard, Skateboarding, Skeleton, Skiing, Spade, SpeedBoat, Spider, Spoon, Stair, Stand, Stationary, SteeringWheel, '
1374
+ 'Stethoscope, Stool, Stove, StreetLamp, SweetStand, Swing, Sword, TV, Table, TableChair, TableLamp, TableTennis, Tank, Tapeline, Teapot, Telescope, Tent, '
1375
+ 'TobaccoPipe, Toy, Tractor, TrafficLight, TrafficSign, Trampoline, TransmissionTower, Tree, Tricycle, TrimmerCover, Tripod, Trombone, Truck, Trumpet, Tuba, '
1376
+ 'UAV, Umbrella, UnevenBars, UtilityPole, VacuumCleaner, Violin, Wakesurfing, Watch, WaterTower, WateringPot, Well, WellLid, Wheel, Wheelchair, WindTurbine, Windmill, WineGlass, WireWhisk, Yacht'
1377
+ )
1378
+ class_labels_TR_sorted = _class_labels_TR_sorted.split(', ')
1379
+
1380
+
1381
+ ### models/backbones/build_backbones.py
1382
+
1383
+ import torch
1384
+ import torch.nn as nn
1385
+ from collections import OrderedDict
1386
+ from torchvision.models import vgg16, vgg16_bn, VGG16_Weights, VGG16_BN_Weights, resnet50, ResNet50_Weights
1387
+ # from models.pvt_v2 import pvt_v2_b0, pvt_v2_b1, pvt_v2_b2, pvt_v2_b5
1388
+ # from models.swin_v1 import swin_v1_t, swin_v1_s, swin_v1_b, swin_v1_l
1389
+ # from config import Config
1390
+
1391
+
1392
+ config = Config()
1393
+
1394
+ def build_backbone(bb_name, pretrained=True, params_settings=''):
1395
+ if bb_name == 'vgg16':
1396
+ bb_net = list(vgg16(pretrained=VGG16_Weights.DEFAULT if pretrained else None).children())[0]
1397
+ bb = nn.Sequential(OrderedDict({'conv1': bb_net[:4], 'conv2': bb_net[4:9], 'conv3': bb_net[9:16], 'conv4': bb_net[16:23]}))
1398
+ elif bb_name == 'vgg16bn':
1399
+ bb_net = list(vgg16_bn(pretrained=VGG16_BN_Weights.DEFAULT if pretrained else None).children())[0]
1400
+ bb = nn.Sequential(OrderedDict({'conv1': bb_net[:6], 'conv2': bb_net[6:13], 'conv3': bb_net[13:23], 'conv4': bb_net[23:33]}))
1401
+ elif bb_name == 'resnet50':
1402
+ bb_net = list(resnet50(pretrained=ResNet50_Weights.DEFAULT if pretrained else None).children())
1403
+ bb = nn.Sequential(OrderedDict({'conv1': nn.Sequential(*bb_net[0:3]), 'conv2': bb_net[4], 'conv3': bb_net[5], 'conv4': bb_net[6]}))
1404
+ else:
1405
+ bb = eval('{}({})'.format(bb_name, params_settings))
1406
+ if pretrained:
1407
+ bb = load_weights(bb, bb_name)
1408
+ return bb
1409
+
1410
+ def load_weights(model, model_name):
1411
+ save_model = torch.load(config.weights[model_name], map_location='cpu')
1412
+ model_dict = model.state_dict()
1413
+ state_dict = {k: v if v.size() == model_dict[k].size() else model_dict[k] for k, v in save_model.items() if k in model_dict.keys()}
1414
+ # to ignore the weights with mismatched size when I modify the backbone itself.
1415
+ if not state_dict:
1416
+ save_model_keys = list(save_model.keys())
1417
+ sub_item = save_model_keys[0] if len(save_model_keys) == 1 else None
1418
+ state_dict = {k: v if v.size() == model_dict[k].size() else model_dict[k] for k, v in save_model[sub_item].items() if k in model_dict.keys()}
1419
+ if not state_dict or not sub_item:
1420
+ print('Weights are not successully loaded. Check the state dict of weights file.')
1421
+ return None
1422
+ else:
1423
+ print('Found correct weights in the "{}" item of loaded state_dict.'.format(sub_item))
1424
+ model_dict.update(state_dict)
1425
+ model.load_state_dict(model_dict)
1426
+ return model
1427
+
1428
+
1429
+
1430
+ ### models/modules/decoder_blocks.py
1431
+
1432
+ import torch
1433
+ import torch.nn as nn
1434
+ # from models.aspp import ASPP, ASPPDeformable
1435
+ # from config import Config
1436
+
1437
+
1438
+ # config = Config()
1439
+
1440
+
1441
+ class BasicDecBlk(nn.Module):
1442
+ def __init__(self, in_channels=64, out_channels=64, inter_channels=64):
1443
+ super(BasicDecBlk, self).__init__()
1444
+ inter_channels = in_channels // 4 if config.dec_channels_inter == 'adap' else 64
1445
+ self.conv_in = nn.Conv2d(in_channels, inter_channels, 3, 1, padding=1)
1446
+ self.relu_in = nn.ReLU(inplace=True)
1447
+ if config.dec_att == 'ASPP':
1448
+ self.dec_att = ASPP(in_channels=inter_channels)
1449
+ elif config.dec_att == 'ASPPDeformable':
1450
+ self.dec_att = ASPPDeformable(in_channels=inter_channels)
1451
+ self.conv_out = nn.Conv2d(inter_channels, out_channels, 3, 1, padding=1)
1452
+ self.bn_in = nn.BatchNorm2d(inter_channels) if config.batch_size > 1 else nn.Identity()
1453
+ self.bn_out = nn.BatchNorm2d(out_channels) if config.batch_size > 1 else nn.Identity()
1454
+
1455
+ def forward(self, x):
1456
+ x = self.conv_in(x)
1457
+ x = self.bn_in(x)
1458
+ x = self.relu_in(x)
1459
+ if hasattr(self, 'dec_att'):
1460
+ x = self.dec_att(x)
1461
+ x = self.conv_out(x)
1462
+ x = self.bn_out(x)
1463
+ return x
1464
+
1465
+
1466
+ class ResBlk(nn.Module):
1467
+ def __init__(self, in_channels=64, out_channels=None, inter_channels=64):
1468
+ super(ResBlk, self).__init__()
1469
+ if out_channels is None:
1470
+ out_channels = in_channels
1471
+ inter_channels = in_channels // 4 if config.dec_channels_inter == 'adap' else 64
1472
+
1473
+ self.conv_in = nn.Conv2d(in_channels, inter_channels, 3, 1, padding=1)
1474
+ self.bn_in = nn.BatchNorm2d(inter_channels) if config.batch_size > 1 else nn.Identity()
1475
+ self.relu_in = nn.ReLU(inplace=True)
1476
+
1477
+ if config.dec_att == 'ASPP':
1478
+ self.dec_att = ASPP(in_channels=inter_channels)
1479
+ elif config.dec_att == 'ASPPDeformable':
1480
+ self.dec_att = ASPPDeformable(in_channels=inter_channels)
1481
+
1482
+ self.conv_out = nn.Conv2d(inter_channels, out_channels, 3, 1, padding=1)
1483
+ self.bn_out = nn.BatchNorm2d(out_channels) if config.batch_size > 1 else nn.Identity()
1484
+
1485
+ self.conv_resi = nn.Conv2d(in_channels, out_channels, 1, 1, 0)
1486
+
1487
+ def forward(self, x):
1488
+ _x = self.conv_resi(x)
1489
+ x = self.conv_in(x)
1490
+ x = self.bn_in(x)
1491
+ x = self.relu_in(x)
1492
+ if hasattr(self, 'dec_att'):
1493
+ x = self.dec_att(x)
1494
+ x = self.conv_out(x)
1495
+ x = self.bn_out(x)
1496
+ return x + _x
1497
+
1498
+
1499
+
1500
+ ### models/modules/lateral_blocks.py
1501
+
1502
+ import numpy as np
1503
+ import torch
1504
+ import torch.nn as nn
1505
+ import torch.nn.functional as F
1506
+ from functools import partial
1507
+
1508
+ # from config import Config
1509
+
1510
+
1511
+ # config = Config()
1512
+
1513
+
1514
+ class BasicLatBlk(nn.Module):
1515
+ def __init__(self, in_channels=64, out_channels=64, inter_channels=64):
1516
+ super(BasicLatBlk, self).__init__()
1517
+ inter_channels = in_channels // 4 if config.dec_channels_inter == 'adap' else 64
1518
+ self.conv = nn.Conv2d(in_channels, out_channels, 1, 1, 0)
1519
+
1520
+ def forward(self, x):
1521
+ x = self.conv(x)
1522
+ return x
1523
+
1524
+
1525
+
1526
+ ### models/modules/aspp.py
1527
+
1528
+ import torch
1529
+ import torch.nn as nn
1530
+ import torch.nn.functional as F
1531
+ # from models.deform_conv import DeformableConv2d
1532
+ # from config import Config
1533
+
1534
+
1535
+ # config = Config()
1536
+
1537
+
1538
+ class _ASPPModule(nn.Module):
1539
+ def __init__(self, in_channels, planes, kernel_size, padding, dilation):
1540
+ super(_ASPPModule, self).__init__()
1541
+ self.atrous_conv = nn.Conv2d(in_channels, planes, kernel_size=kernel_size,
1542
+ stride=1, padding=padding, dilation=dilation, bias=False)
1543
+ self.bn = nn.BatchNorm2d(planes) if config.batch_size > 1 else nn.Identity()
1544
+ self.relu = nn.ReLU(inplace=True)
1545
+
1546
+ def forward(self, x):
1547
+ x = self.atrous_conv(x)
1548
+ x = self.bn(x)
1549
+
1550
+ return self.relu(x)
1551
+
1552
+
1553
+ class ASPP(nn.Module):
1554
+ def __init__(self, in_channels=64, out_channels=None, output_stride=16):
1555
+ super(ASPP, self).__init__()
1556
+ self.down_scale = 1
1557
+ if out_channels is None:
1558
+ out_channels = in_channels
1559
+ self.in_channelster = 256 // self.down_scale
1560
+ if output_stride == 16:
1561
+ dilations = [1, 6, 12, 18]
1562
+ elif output_stride == 8:
1563
+ dilations = [1, 12, 24, 36]
1564
+ else:
1565
+ raise NotImplementedError
1566
+
1567
+ self.aspp1 = _ASPPModule(in_channels, self.in_channelster, 1, padding=0, dilation=dilations[0])
1568
+ self.aspp2 = _ASPPModule(in_channels, self.in_channelster, 3, padding=dilations[1], dilation=dilations[1])
1569
+ self.aspp3 = _ASPPModule(in_channels, self.in_channelster, 3, padding=dilations[2], dilation=dilations[2])
1570
+ self.aspp4 = _ASPPModule(in_channels, self.in_channelster, 3, padding=dilations[3], dilation=dilations[3])
1571
+
1572
+ self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
1573
+ nn.Conv2d(in_channels, self.in_channelster, 1, stride=1, bias=False),
1574
+ nn.BatchNorm2d(self.in_channelster) if config.batch_size > 1 else nn.Identity(),
1575
+ nn.ReLU(inplace=True))
1576
+ self.conv1 = nn.Conv2d(self.in_channelster * 5, out_channels, 1, bias=False)
1577
+ self.bn1 = nn.BatchNorm2d(out_channels) if config.batch_size > 1 else nn.Identity()
1578
+ self.relu = nn.ReLU(inplace=True)
1579
+ self.dropout = nn.Dropout(0.5)
1580
+
1581
+ def forward(self, x):
1582
+ x1 = self.aspp1(x)
1583
+ x2 = self.aspp2(x)
1584
+ x3 = self.aspp3(x)
1585
+ x4 = self.aspp4(x)
1586
+ x5 = self.global_avg_pool(x)
1587
+ x5 = F.interpolate(x5, size=x1.size()[2:], mode='bilinear', align_corners=True)
1588
+ x = torch.cat((x1, x2, x3, x4, x5), dim=1)
1589
+
1590
+ x = self.conv1(x)
1591
+ x = self.bn1(x)
1592
+ x = self.relu(x)
1593
+
1594
+ return self.dropout(x)
1595
+
1596
+
1597
+ ##################### Deformable
1598
+ class _ASPPModuleDeformable(nn.Module):
1599
+ def __init__(self, in_channels, planes, kernel_size, padding):
1600
+ super(_ASPPModuleDeformable, self).__init__()
1601
+ self.atrous_conv = DeformableConv2d(in_channels, planes, kernel_size=kernel_size,
1602
+ stride=1, padding=padding, bias=False)
1603
+ self.bn = nn.BatchNorm2d(planes) if config.batch_size > 1 else nn.Identity()
1604
+ self.relu = nn.ReLU(inplace=True)
1605
+
1606
+ def forward(self, x):
1607
+ x = self.atrous_conv(x)
1608
+ x = self.bn(x)
1609
+
1610
+ return self.relu(x)
1611
+
1612
+
1613
+ class ASPPDeformable(nn.Module):
1614
+ def __init__(self, in_channels, out_channels=None, parallel_block_sizes=[1, 3, 7]):
1615
+ super(ASPPDeformable, self).__init__()
1616
+ self.down_scale = 1
1617
+ if out_channels is None:
1618
+ out_channels = in_channels
1619
+ self.in_channelster = 256 // self.down_scale
1620
+
1621
+ self.aspp1 = _ASPPModuleDeformable(in_channels, self.in_channelster, 1, padding=0)
1622
+ self.aspp_deforms = nn.ModuleList([
1623
+ _ASPPModuleDeformable(in_channels, self.in_channelster, conv_size, padding=int(conv_size//2)) for conv_size in parallel_block_sizes
1624
+ ])
1625
+
1626
+ self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
1627
+ nn.Conv2d(in_channels, self.in_channelster, 1, stride=1, bias=False),
1628
+ nn.BatchNorm2d(self.in_channelster) if config.batch_size > 1 else nn.Identity(),
1629
+ nn.ReLU(inplace=True))
1630
+ self.conv1 = nn.Conv2d(self.in_channelster * (2 + len(self.aspp_deforms)), out_channels, 1, bias=False)
1631
+ self.bn1 = nn.BatchNorm2d(out_channels) if config.batch_size > 1 else nn.Identity()
1632
+ self.relu = nn.ReLU(inplace=True)
1633
+ self.dropout = nn.Dropout(0.5)
1634
+
1635
+ def forward(self, x):
1636
+ x1 = self.aspp1(x)
1637
+ x_aspp_deforms = [aspp_deform(x) for aspp_deform in self.aspp_deforms]
1638
+ x5 = self.global_avg_pool(x)
1639
+ x5 = F.interpolate(x5, size=x1.size()[2:], mode='bilinear', align_corners=True)
1640
+ x = torch.cat((x1, *x_aspp_deforms, x5), dim=1)
1641
+
1642
+ x = self.conv1(x)
1643
+ x = self.bn1(x)
1644
+ x = self.relu(x)
1645
+
1646
+ return self.dropout(x)
1647
+
1648
+
1649
+
1650
+ ### models/refinement/refiner.py
1651
+
1652
+ import torch
1653
+ import torch.nn as nn
1654
+ from collections import OrderedDict
1655
+ import torch
1656
+ import torch.nn as nn
1657
+ import torch.nn.functional as F
1658
+ from torchvision.models import vgg16, vgg16_bn
1659
+ from torchvision.models import resnet50
1660
+
1661
+ # from config import Config
1662
+ # from dataset import class_labels_TR_sorted
1663
+ # from models.build_backbone import build_backbone
1664
+ # from models.decoder_blocks import BasicDecBlk
1665
+ # from models.lateral_blocks import BasicLatBlk
1666
+ # from models.ing import *
1667
+ # from models.stem_layer import StemLayer
1668
+
1669
+
1670
+ class RefinerPVTInChannels4(nn.Module):
1671
+ def __init__(self, in_channels=3+1):
1672
+ super(RefinerPVTInChannels4, self).__init__()
1673
+ self.config = Config()
1674
+ self.epoch = 1
1675
+ self.bb = build_backbone(self.config.bb, params_settings='in_channels=4')
1676
+
1677
+ lateral_channels_in_collection = {
1678
+ 'vgg16': [512, 256, 128, 64], 'vgg16bn': [512, 256, 128, 64], 'resnet50': [1024, 512, 256, 64],
1679
+ 'pvt_v2_b2': [512, 320, 128, 64], 'pvt_v2_b5': [512, 320, 128, 64],
1680
+ 'swin_v1_b': [1024, 512, 256, 128], 'swin_v1_l': [1536, 768, 384, 192],
1681
+ }
1682
+ channels = lateral_channels_in_collection[self.config.bb]
1683
+ self.squeeze_module = BasicDecBlk(channels[0], channels[0])
1684
+
1685
+ self.decoder = Decoder(channels)
1686
+
1687
+ if 0:
1688
+ for key, value in self.named_parameters():
1689
+ if 'bb.' in key:
1690
+ value.requires_grad = False
1691
+
1692
+ def forward(self, x):
1693
+ if isinstance(x, list):
1694
+ x = torch.cat(x, dim=1)
1695
+ ########## Encoder ##########
1696
+ if self.config.bb in ['vgg16', 'vgg16bn', 'resnet50']:
1697
+ x1 = self.bb.conv1(x)
1698
+ x2 = self.bb.conv2(x1)
1699
+ x3 = self.bb.conv3(x2)
1700
+ x4 = self.bb.conv4(x3)
1701
+ else:
1702
+ x1, x2, x3, x4 = self.bb(x)
1703
+
1704
+ x4 = self.squeeze_module(x4)
1705
+
1706
+ ########## Decoder ##########
1707
+
1708
+ features = [x, x1, x2, x3, x4]
1709
+ scaled_preds = self.decoder(features)
1710
+
1711
+ return scaled_preds
1712
+
1713
+
1714
+ class Refiner(nn.Module):
1715
+ def __init__(self, in_channels=3+1):
1716
+ super(Refiner, self).__init__()
1717
+ self.config = Config()
1718
+ self.epoch = 1
1719
+ self.stem_layer = StemLayer(in_channels=in_channels, inter_channels=48, out_channels=3, norm_layer='BN' if self.config.batch_size > 1 else 'LN')
1720
+ self.bb = build_backbone(self.config.bb)
1721
+
1722
+ lateral_channels_in_collection = {
1723
+ 'vgg16': [512, 256, 128, 64], 'vgg16bn': [512, 256, 128, 64], 'resnet50': [1024, 512, 256, 64],
1724
+ 'pvt_v2_b2': [512, 320, 128, 64], 'pvt_v2_b5': [512, 320, 128, 64],
1725
+ 'swin_v1_b': [1024, 512, 256, 128], 'swin_v1_l': [1536, 768, 384, 192],
1726
+ }
1727
+ channels = lateral_channels_in_collection[self.config.bb]
1728
+ self.squeeze_module = BasicDecBlk(channels[0], channels[0])
1729
+
1730
+ self.decoder = Decoder(channels)
1731
+
1732
+ if 0:
1733
+ for key, value in self.named_parameters():
1734
+ if 'bb.' in key:
1735
+ value.requires_grad = False
1736
+
1737
+ def forward(self, x):
1738
+ if isinstance(x, list):
1739
+ x = torch.cat(x, dim=1)
1740
+ x = self.stem_layer(x)
1741
+ ########## Encoder ##########
1742
+ if self.config.bb in ['vgg16', 'vgg16bn', 'resnet50']:
1743
+ x1 = self.bb.conv1(x)
1744
+ x2 = self.bb.conv2(x1)
1745
+ x3 = self.bb.conv3(x2)
1746
+ x4 = self.bb.conv4(x3)
1747
+ else:
1748
+ x1, x2, x3, x4 = self.bb(x)
1749
+
1750
+ x4 = self.squeeze_module(x4)
1751
+
1752
+ ########## Decoder ##########
1753
+
1754
+ features = [x, x1, x2, x3, x4]
1755
+ scaled_preds = self.decoder(features)
1756
+
1757
+ return scaled_preds
1758
+
1759
+
1760
+ class Decoder(nn.Module):
1761
+ def __init__(self, channels):
1762
+ super(Decoder, self).__init__()
1763
+ self.config = Config()
1764
+ DecoderBlock = eval('BasicDecBlk')
1765
+ LateralBlock = eval('BasicLatBlk')
1766
+
1767
+ self.decoder_block4 = DecoderBlock(channels[0], channels[1])
1768
+ self.decoder_block3 = DecoderBlock(channels[1], channels[2])
1769
+ self.decoder_block2 = DecoderBlock(channels[2], channels[3])
1770
+ self.decoder_block1 = DecoderBlock(channels[3], channels[3]//2)
1771
+
1772
+ self.lateral_block4 = LateralBlock(channels[1], channels[1])
1773
+ self.lateral_block3 = LateralBlock(channels[2], channels[2])
1774
+ self.lateral_block2 = LateralBlock(channels[3], channels[3])
1775
+
1776
+ if self.config.ms_supervision:
1777
+ self.conv_ms_spvn_4 = nn.Conv2d(channels[1], 1, 1, 1, 0)
1778
+ self.conv_ms_spvn_3 = nn.Conv2d(channels[2], 1, 1, 1, 0)
1779
+ self.conv_ms_spvn_2 = nn.Conv2d(channels[3], 1, 1, 1, 0)
1780
+ self.conv_out1 = nn.Sequential(nn.Conv2d(channels[3]//2, 1, 1, 1, 0))
1781
+
1782
+ def forward(self, features):
1783
+ x, x1, x2, x3, x4 = features
1784
+ outs = []
1785
+ p4 = self.decoder_block4(x4)
1786
+ _p4 = F.interpolate(p4, size=x3.shape[2:], mode='bilinear', align_corners=True)
1787
+ _p3 = _p4 + self.lateral_block4(x3)
1788
+
1789
+ p3 = self.decoder_block3(_p3)
1790
+ _p3 = F.interpolate(p3, size=x2.shape[2:], mode='bilinear', align_corners=True)
1791
+ _p2 = _p3 + self.lateral_block3(x2)
1792
+
1793
+ p2 = self.decoder_block2(_p2)
1794
+ _p2 = F.interpolate(p2, size=x1.shape[2:], mode='bilinear', align_corners=True)
1795
+ _p1 = _p2 + self.lateral_block2(x1)
1796
+
1797
+ _p1 = self.decoder_block1(_p1)
1798
+ _p1 = F.interpolate(_p1, size=x.shape[2:], mode='bilinear', align_corners=True)
1799
+ p1_out = self.conv_out1(_p1)
1800
+
1801
+ if self.config.ms_supervision:
1802
+ outs.append(self.conv_ms_spvn_4(p4))
1803
+ outs.append(self.conv_ms_spvn_3(p3))
1804
+ outs.append(self.conv_ms_spvn_2(p2))
1805
+ outs.append(p1_out)
1806
+ return outs
1807
+
1808
+
1809
+ class RefUNet(nn.Module):
1810
+ # Refinement
1811
+ def __init__(self, in_channels=3+1):
1812
+ super(RefUNet, self).__init__()
1813
+ self.encoder_1 = nn.Sequential(
1814
+ nn.Conv2d(in_channels, 64, 3, 1, 1),
1815
+ nn.Conv2d(64, 64, 3, 1, 1),
1816
+ nn.BatchNorm2d(64),
1817
+ nn.ReLU(inplace=True)
1818
+ )
1819
+
1820
+ self.encoder_2 = nn.Sequential(
1821
+ nn.MaxPool2d(2, 2, ceil_mode=True),
1822
+ nn.Conv2d(64, 64, 3, 1, 1),
1823
+ nn.BatchNorm2d(64),
1824
+ nn.ReLU(inplace=True)
1825
+ )
1826
+
1827
+ self.encoder_3 = nn.Sequential(
1828
+ nn.MaxPool2d(2, 2, ceil_mode=True),
1829
+ nn.Conv2d(64, 64, 3, 1, 1),
1830
+ nn.BatchNorm2d(64),
1831
+ nn.ReLU(inplace=True)
1832
+ )
1833
+
1834
+ self.encoder_4 = nn.Sequential(
1835
+ nn.MaxPool2d(2, 2, ceil_mode=True),
1836
+ nn.Conv2d(64, 64, 3, 1, 1),
1837
+ nn.BatchNorm2d(64),
1838
+ nn.ReLU(inplace=True)
1839
+ )
1840
+
1841
+ self.pool4 = nn.MaxPool2d(2, 2, ceil_mode=True)
1842
+ #####
1843
+ self.decoder_5 = nn.Sequential(
1844
+ nn.Conv2d(64, 64, 3, 1, 1),
1845
+ nn.BatchNorm2d(64),
1846
+ nn.ReLU(inplace=True)
1847
+ )
1848
+ #####
1849
+ self.decoder_4 = nn.Sequential(
1850
+ nn.Conv2d(128, 64, 3, 1, 1),
1851
+ nn.BatchNorm2d(64),
1852
+ nn.ReLU(inplace=True)
1853
+ )
1854
+
1855
+ self.decoder_3 = nn.Sequential(
1856
+ nn.Conv2d(128, 64, 3, 1, 1),
1857
+ nn.BatchNorm2d(64),
1858
+ nn.ReLU(inplace=True)
1859
+ )
1860
+
1861
+ self.decoder_2 = nn.Sequential(
1862
+ nn.Conv2d(128, 64, 3, 1, 1),
1863
+ nn.BatchNorm2d(64),
1864
+ nn.ReLU(inplace=True)
1865
+ )
1866
+
1867
+ self.decoder_1 = nn.Sequential(
1868
+ nn.Conv2d(128, 64, 3, 1, 1),
1869
+ nn.BatchNorm2d(64),
1870
+ nn.ReLU(inplace=True)
1871
+ )
1872
+
1873
+ self.conv_d0 = nn.Conv2d(64, 1, 3, 1, 1)
1874
+
1875
+ self.upscore2 = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
1876
+
1877
+ def forward(self, x):
1878
+ outs = []
1879
+ if isinstance(x, list):
1880
+ x = torch.cat(x, dim=1)
1881
+ hx = x
1882
+
1883
+ hx1 = self.encoder_1(hx)
1884
+ hx2 = self.encoder_2(hx1)
1885
+ hx3 = self.encoder_3(hx2)
1886
+ hx4 = self.encoder_4(hx3)
1887
+
1888
+ hx = self.decoder_5(self.pool4(hx4))
1889
+ hx = torch.cat((self.upscore2(hx), hx4), 1)
1890
+
1891
+ d4 = self.decoder_4(hx)
1892
+ hx = torch.cat((self.upscore2(d4), hx3), 1)
1893
+
1894
+ d3 = self.decoder_3(hx)
1895
+ hx = torch.cat((self.upscore2(d3), hx2), 1)
1896
+
1897
+ d2 = self.decoder_2(hx)
1898
+ hx = torch.cat((self.upscore2(d2), hx1), 1)
1899
+
1900
+ d1 = self.decoder_1(hx)
1901
+
1902
+ x = self.conv_d0(d1)
1903
+ outs.append(x)
1904
+ return outs
1905
+
1906
+
1907
+
1908
+ ### models/stem_layer.py
1909
+
1910
+ import torch.nn as nn
1911
+ # from utils import build_act_layer, build_norm_layer
1912
+
1913
+
1914
+ class StemLayer(nn.Module):
1915
+ r""" Stem layer of InternImage
1916
+ Args:
1917
+ in_channels (int): number of input channels
1918
+ out_channels (int): number of output channels
1919
+ act_layer (str): activation layer
1920
+ norm_layer (str): normalization layer
1921
+ """
1922
+
1923
+ def __init__(self,
1924
+ in_channels=3+1,
1925
+ inter_channels=48,
1926
+ out_channels=96,
1927
+ act_layer='GELU',
1928
+ norm_layer='BN'):
1929
+ super().__init__()
1930
+ self.conv1 = nn.Conv2d(in_channels,
1931
+ inter_channels,
1932
+ kernel_size=3,
1933
+ stride=1,
1934
+ padding=1)
1935
+ self.norm1 = build_norm_layer(
1936
+ inter_channels, norm_layer, 'channels_first', 'channels_first'
1937
+ )
1938
+ self.act = build_act_layer(act_layer)
1939
+ self.conv2 = nn.Conv2d(inter_channels,
1940
+ out_channels,
1941
+ kernel_size=3,
1942
+ stride=1,
1943
+ padding=1)
1944
+ self.norm2 = build_norm_layer(
1945
+ out_channels, norm_layer, 'channels_first', 'channels_first'
1946
+ )
1947
+
1948
+ def forward(self, x):
1949
+ x = self.conv1(x)
1950
+ x = self.norm1(x)
1951
+ x = self.act(x)
1952
+ x = self.conv2(x)
1953
+ x = self.norm2(x)
1954
+ return x
1955
+
1956
+
1957
+ ### models/birefnet.py
1958
+
1959
+ import torch
1960
+ import torch.nn as nn
1961
+ import torch.nn.functional as F
1962
+ from kornia.filters import laplacian
1963
+ from transformers import PreTrainedModel
1964
+
1965
+ # from config import Config
1966
+ # from dataset import class_labels_TR_sorted
1967
+ # from models.build_backbone import build_backbone
1968
+ # from models.decoder_blocks import BasicDecBlk, ResBlk, HierarAttDecBlk
1969
+ # from models.lateral_blocks import BasicLatBlk
1970
+ # from models.aspp import ASPP, ASPPDeformable
1971
+ # from models.ing import *
1972
+ # from models.refiner import Refiner, RefinerPVTInChannels4, RefUNet
1973
+ # from models.stem_layer import StemLayer
1974
+ from .BiRefNet_config import BiRefNetConfig
1975
+
1976
+
1977
+ class BiRefNet(
1978
+ PreTrainedModel
1979
+ ):
1980
+ config_class = BiRefNetConfig
1981
+ def __init__(self, bb_pretrained=True, config=BiRefNetConfig()):
1982
+ super(BiRefNet, self).__init__(config)
1983
+ bb_pretrained = config.bb_pretrained
1984
+ self.config = Config()
1985
+ self.epoch = 1
1986
+ self.bb = build_backbone(self.config.bb, pretrained=bb_pretrained)
1987
+
1988
+ channels = self.config.lateral_channels_in_collection
1989
+
1990
+ if self.config.auxiliary_classification:
1991
+ self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
1992
+ self.cls_head = nn.Sequential(
1993
+ nn.Linear(channels[0], len(class_labels_TR_sorted))
1994
+ )
1995
+
1996
+ if self.config.squeeze_block:
1997
+ self.squeeze_module = nn.Sequential(*[
1998
+ eval(self.config.squeeze_block.split('_x')[0])(channels[0]+sum(self.config.cxt), channels[0])
1999
+ for _ in range(eval(self.config.squeeze_block.split('_x')[1]))
2000
+ ])
2001
+
2002
+ self.decoder = Decoder(channels)
2003
+
2004
+ if self.config.ender:
2005
+ self.dec_end = nn.Sequential(
2006
+ nn.Conv2d(1, 16, 3, 1, 1),
2007
+ nn.Conv2d(16, 1, 3, 1, 1),
2008
+ nn.ReLU(inplace=True),
2009
+ )
2010
+
2011
+ # refine patch-level segmentation
2012
+ if self.config.refine:
2013
+ if self.config.refine == 'itself':
2014
+ self.stem_layer = StemLayer(in_channels=3+1, inter_channels=48, out_channels=3, norm_layer='BN' if self.config.batch_size > 1 else 'LN')
2015
+ else:
2016
+ self.refiner = eval('{}({})'.format(self.config.refine, 'in_channels=3+1'))
2017
+
2018
+ if self.config.freeze_bb:
2019
+ # Freeze the backbone...
2020
+ print(self.named_parameters())
2021
+ for key, value in self.named_parameters():
2022
+ if 'bb.' in key and 'refiner.' not in key:
2023
+ value.requires_grad = False
2024
+
2025
+ def forward_enc(self, x):
2026
+ if self.config.bb in ['vgg16', 'vgg16bn', 'resnet50']:
2027
+ x1 = self.bb.conv1(x); x2 = self.bb.conv2(x1); x3 = self.bb.conv3(x2); x4 = self.bb.conv4(x3)
2028
+ else:
2029
+ x1, x2, x3, x4 = self.bb(x)
2030
+ if self.config.mul_scl_ipt == 'cat':
2031
+ B, C, H, W = x.shape
2032
+ x1_, x2_, x3_, x4_ = self.bb(F.interpolate(x, size=(H//2, W//2), mode='bilinear', align_corners=True))
2033
+ x1 = torch.cat([x1, F.interpolate(x1_, size=x1.shape[2:], mode='bilinear', align_corners=True)], dim=1)
2034
+ x2 = torch.cat([x2, F.interpolate(x2_, size=x2.shape[2:], mode='bilinear', align_corners=True)], dim=1)
2035
+ x3 = torch.cat([x3, F.interpolate(x3_, size=x3.shape[2:], mode='bilinear', align_corners=True)], dim=1)
2036
+ x4 = torch.cat([x4, F.interpolate(x4_, size=x4.shape[2:], mode='bilinear', align_corners=True)], dim=1)
2037
+ elif self.config.mul_scl_ipt == 'add':
2038
+ B, C, H, W = x.shape
2039
+ x1_, x2_, x3_, x4_ = self.bb(F.interpolate(x, size=(H//2, W//2), mode='bilinear', align_corners=True))
2040
+ x1 = x1 + F.interpolate(x1_, size=x1.shape[2:], mode='bilinear', align_corners=True)
2041
+ x2 = x2 + F.interpolate(x2_, size=x2.shape[2:], mode='bilinear', align_corners=True)
2042
+ x3 = x3 + F.interpolate(x3_, size=x3.shape[2:], mode='bilinear', align_corners=True)
2043
+ x4 = x4 + F.interpolate(x4_, size=x4.shape[2:], mode='bilinear', align_corners=True)
2044
+ class_preds = self.cls_head(self.avgpool(x4).view(x4.shape[0], -1)) if self.training and self.config.auxiliary_classification else None
2045
+ if self.config.cxt:
2046
+ x4 = torch.cat(
2047
+ (
2048
+ *[
2049
+ F.interpolate(x1, size=x4.shape[2:], mode='bilinear', align_corners=True),
2050
+ F.interpolate(x2, size=x4.shape[2:], mode='bilinear', align_corners=True),
2051
+ F.interpolate(x3, size=x4.shape[2:], mode='bilinear', align_corners=True),
2052
+ ][-len(self.config.cxt):],
2053
+ x4
2054
+ ),
2055
+ dim=1
2056
+ )
2057
+ return (x1, x2, x3, x4), class_preds
2058
+
2059
+ def forward_ori(self, x):
2060
+ ########## Encoder ##########
2061
+ (x1, x2, x3, x4), class_preds = self.forward_enc(x)
2062
+ if self.config.squeeze_block:
2063
+ x4 = self.squeeze_module(x4)
2064
+ ########## Decoder ##########
2065
+ features = [x, x1, x2, x3, x4]
2066
+ if self.training and self.config.out_ref:
2067
+ features.append(laplacian(torch.mean(x, dim=1).unsqueeze(1), kernel_size=5))
2068
+ scaled_preds = self.decoder(features)
2069
+ return scaled_preds, class_preds
2070
+
2071
+ def forward(self, x):
2072
+ scaled_preds, class_preds = self.forward_ori(x)
2073
+ class_preds_lst = [class_preds]
2074
+ return [scaled_preds, class_preds_lst] if self.training else scaled_preds
2075
+
2076
+
2077
+ class Decoder(nn.Module):
2078
+ def __init__(self, channels):
2079
+ super(Decoder, self).__init__()
2080
+ self.config = Config()
2081
+ DecoderBlock = eval(self.config.dec_blk)
2082
+ LateralBlock = eval(self.config.lat_blk)
2083
+
2084
+ if self.config.dec_ipt:
2085
+ self.split = self.config.dec_ipt_split
2086
+ N_dec_ipt = 64
2087
+ DBlock = SimpleConvs
2088
+ ic = 64
2089
+ ipt_cha_opt = 1
2090
+ self.ipt_blk5 = DBlock(2**10*3 if self.split else 3, [N_dec_ipt, channels[0]//8][ipt_cha_opt], inter_channels=ic)
2091
+ self.ipt_blk4 = DBlock(2**8*3 if self.split else 3, [N_dec_ipt, channels[0]//8][ipt_cha_opt], inter_channels=ic)
2092
+ self.ipt_blk3 = DBlock(2**6*3 if self.split else 3, [N_dec_ipt, channels[1]//8][ipt_cha_opt], inter_channels=ic)
2093
+ self.ipt_blk2 = DBlock(2**4*3 if self.split else 3, [N_dec_ipt, channels[2]//8][ipt_cha_opt], inter_channels=ic)
2094
+ self.ipt_blk1 = DBlock(2**0*3 if self.split else 3, [N_dec_ipt, channels[3]//8][ipt_cha_opt], inter_channels=ic)
2095
+ else:
2096
+ self.split = None
2097
+
2098
+ self.decoder_block4 = DecoderBlock(channels[0]+([N_dec_ipt, channels[0]//8][ipt_cha_opt] if self.config.dec_ipt else 0), channels[1])
2099
+ self.decoder_block3 = DecoderBlock(channels[1]+([N_dec_ipt, channels[0]//8][ipt_cha_opt] if self.config.dec_ipt else 0), channels[2])
2100
+ self.decoder_block2 = DecoderBlock(channels[2]+([N_dec_ipt, channels[1]//8][ipt_cha_opt] if self.config.dec_ipt else 0), channels[3])
2101
+ self.decoder_block1 = DecoderBlock(channels[3]+([N_dec_ipt, channels[2]//8][ipt_cha_opt] if self.config.dec_ipt else 0), channels[3]//2)
2102
+ self.conv_out1 = nn.Sequential(nn.Conv2d(channels[3]//2+([N_dec_ipt, channels[3]//8][ipt_cha_opt] if self.config.dec_ipt else 0), 1, 1, 1, 0))
2103
+
2104
+ self.lateral_block4 = LateralBlock(channels[1], channels[1])
2105
+ self.lateral_block3 = LateralBlock(channels[2], channels[2])
2106
+ self.lateral_block2 = LateralBlock(channels[3], channels[3])
2107
+
2108
+ if self.config.ms_supervision:
2109
+ self.conv_ms_spvn_4 = nn.Conv2d(channels[1], 1, 1, 1, 0)
2110
+ self.conv_ms_spvn_3 = nn.Conv2d(channels[2], 1, 1, 1, 0)
2111
+ self.conv_ms_spvn_2 = nn.Conv2d(channels[3], 1, 1, 1, 0)
2112
+
2113
+ if self.config.out_ref:
2114
+ _N = 16
2115
+ self.gdt_convs_4 = nn.Sequential(nn.Conv2d(channels[1], _N, 3, 1, 1), nn.BatchNorm2d(_N) if self.config.batch_size > 1 else nn.Identity(), nn.ReLU(inplace=True))
2116
+ self.gdt_convs_3 = nn.Sequential(nn.Conv2d(channels[2], _N, 3, 1, 1), nn.BatchNorm2d(_N) if self.config.batch_size > 1 else nn.Identity(), nn.ReLU(inplace=True))
2117
+ self.gdt_convs_2 = nn.Sequential(nn.Conv2d(channels[3], _N, 3, 1, 1), nn.BatchNorm2d(_N) if self.config.batch_size > 1 else nn.Identity(), nn.ReLU(inplace=True))
2118
+
2119
+ self.gdt_convs_pred_4 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0))
2120
+ self.gdt_convs_pred_3 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0))
2121
+ self.gdt_convs_pred_2 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0))
2122
+
2123
+ self.gdt_convs_attn_4 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0))
2124
+ self.gdt_convs_attn_3 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0))
2125
+ self.gdt_convs_attn_2 = nn.Sequential(nn.Conv2d(_N, 1, 1, 1, 0))
2126
+
2127
+ def get_patches_batch(self, x, p):
2128
+ _size_h, _size_w = p.shape[2:]
2129
+ patches_batch = []
2130
+ for idx in range(x.shape[0]):
2131
+ columns_x = torch.split(x[idx], split_size_or_sections=_size_w, dim=-1)
2132
+ patches_x = []
2133
+ for column_x in columns_x:
2134
+ patches_x += [p.unsqueeze(0) for p in torch.split(column_x, split_size_or_sections=_size_h, dim=-2)]
2135
+ patch_sample = torch.cat(patches_x, dim=1)
2136
+ patches_batch.append(patch_sample)
2137
+ return torch.cat(patches_batch, dim=0)
2138
+
2139
+ def forward(self, features):
2140
+ if self.training and self.config.out_ref:
2141
+ outs_gdt_pred = []
2142
+ outs_gdt_label = []
2143
+ x, x1, x2, x3, x4, gdt_gt = features
2144
+ else:
2145
+ x, x1, x2, x3, x4 = features
2146
+ outs = []
2147
+
2148
+ if self.config.dec_ipt:
2149
+ patches_batch = self.get_patches_batch(x, x4) if self.split else x
2150
+ x4 = torch.cat((x4, self.ipt_blk5(F.interpolate(patches_batch, size=x4.shape[2:], mode='bilinear', align_corners=True))), 1)
2151
+ p4 = self.decoder_block4(x4)
2152
+ m4 = self.conv_ms_spvn_4(p4) if self.config.ms_supervision else None
2153
+ if self.config.out_ref:
2154
+ p4_gdt = self.gdt_convs_4(p4)
2155
+ if self.training:
2156
+ # >> GT:
2157
+ m4_dia = m4
2158
+ gdt_label_main_4 = gdt_gt * F.interpolate(m4_dia, size=gdt_gt.shape[2:], mode='bilinear', align_corners=True)
2159
+ outs_gdt_label.append(gdt_label_main_4)
2160
+ # >> Pred:
2161
+ gdt_pred_4 = self.gdt_convs_pred_4(p4_gdt)
2162
+ outs_gdt_pred.append(gdt_pred_4)
2163
+ gdt_attn_4 = self.gdt_convs_attn_4(p4_gdt).sigmoid()
2164
+ # >> Finally:
2165
+ p4 = p4 * gdt_attn_4
2166
+ _p4 = F.interpolate(p4, size=x3.shape[2:], mode='bilinear', align_corners=True)
2167
+ _p3 = _p4 + self.lateral_block4(x3)
2168
+
2169
+ if self.config.dec_ipt:
2170
+ patches_batch = self.get_patches_batch(x, _p3) if self.split else x
2171
+ _p3 = torch.cat((_p3, self.ipt_blk4(F.interpolate(patches_batch, size=x3.shape[2:], mode='bilinear', align_corners=True))), 1)
2172
+ p3 = self.decoder_block3(_p3)
2173
+ m3 = self.conv_ms_spvn_3(p3) if self.config.ms_supervision else None
2174
+ if self.config.out_ref:
2175
+ p3_gdt = self.gdt_convs_3(p3)
2176
+ if self.training:
2177
+ # >> GT:
2178
+ # m3 --dilation--> m3_dia
2179
+ # G_3^gt * m3_dia --> G_3^m, which is the label of gradient
2180
+ m3_dia = m3
2181
+ gdt_label_main_3 = gdt_gt * F.interpolate(m3_dia, size=gdt_gt.shape[2:], mode='bilinear', align_corners=True)
2182
+ outs_gdt_label.append(gdt_label_main_3)
2183
+ # >> Pred:
2184
+ # p3 --conv--BN--> F_3^G, where F_3^G predicts the \hat{G_3} with xx
2185
+ # F_3^G --sigmoid--> A_3^G
2186
+ gdt_pred_3 = self.gdt_convs_pred_3(p3_gdt)
2187
+ outs_gdt_pred.append(gdt_pred_3)
2188
+ gdt_attn_3 = self.gdt_convs_attn_3(p3_gdt).sigmoid()
2189
+ # >> Finally:
2190
+ # p3 = p3 * A_3^G
2191
+ p3 = p3 * gdt_attn_3
2192
+ _p3 = F.interpolate(p3, size=x2.shape[2:], mode='bilinear', align_corners=True)
2193
+ _p2 = _p3 + self.lateral_block3(x2)
2194
+
2195
+ if self.config.dec_ipt:
2196
+ patches_batch = self.get_patches_batch(x, _p2) if self.split else x
2197
+ _p2 = torch.cat((_p2, self.ipt_blk3(F.interpolate(patches_batch, size=x2.shape[2:], mode='bilinear', align_corners=True))), 1)
2198
+ p2 = self.decoder_block2(_p2)
2199
+ m2 = self.conv_ms_spvn_2(p2) if self.config.ms_supervision else None
2200
+ if self.config.out_ref:
2201
+ p2_gdt = self.gdt_convs_2(p2)
2202
+ if self.training:
2203
+ # >> GT:
2204
+ m2_dia = m2
2205
+ gdt_label_main_2 = gdt_gt * F.interpolate(m2_dia, size=gdt_gt.shape[2:], mode='bilinear', align_corners=True)
2206
+ outs_gdt_label.append(gdt_label_main_2)
2207
+ # >> Pred:
2208
+ gdt_pred_2 = self.gdt_convs_pred_2(p2_gdt)
2209
+ outs_gdt_pred.append(gdt_pred_2)
2210
+ gdt_attn_2 = self.gdt_convs_attn_2(p2_gdt).sigmoid()
2211
+ # >> Finally:
2212
+ p2 = p2 * gdt_attn_2
2213
+ _p2 = F.interpolate(p2, size=x1.shape[2:], mode='bilinear', align_corners=True)
2214
+ _p1 = _p2 + self.lateral_block2(x1)
2215
+
2216
+ if self.config.dec_ipt:
2217
+ patches_batch = self.get_patches_batch(x, _p1) if self.split else x
2218
+ _p1 = torch.cat((_p1, self.ipt_blk2(F.interpolate(patches_batch, size=x1.shape[2:], mode='bilinear', align_corners=True))), 1)
2219
+ _p1 = self.decoder_block1(_p1)
2220
+ _p1 = F.interpolate(_p1, size=x.shape[2:], mode='bilinear', align_corners=True)
2221
+
2222
+ if self.config.dec_ipt:
2223
+ patches_batch = self.get_patches_batch(x, _p1) if self.split else x
2224
+ _p1 = torch.cat((_p1, self.ipt_blk1(F.interpolate(patches_batch, size=x.shape[2:], mode='bilinear', align_corners=True))), 1)
2225
+ p1_out = self.conv_out1(_p1)
2226
+
2227
+ if self.config.ms_supervision:
2228
+ outs.append(m4)
2229
+ outs.append(m3)
2230
+ outs.append(m2)
2231
+ outs.append(p1_out)
2232
+ return outs if not (self.config.out_ref and self.training) else ([outs_gdt_pred, outs_gdt_label], outs)
2233
+
2234
+
2235
+ class SimpleConvs(nn.Module):
2236
+ def __init__(
2237
+ self, in_channels: int, out_channels: int, inter_channels=64
2238
+ ) -> None:
2239
+ super().__init__()
2240
+ self.conv1 = nn.Conv2d(in_channels, inter_channels, 3, 1, 1)
2241
+ self.conv_out = nn.Conv2d(inter_channels, out_channels, 3, 1, 1)
2242
+
2243
+ def forward(self, x):
2244
+ return self.conv_out(self.conv1(x))
models/BiRefNet/RMBG-2.0/collage5.png ADDED

Git LFS Details

  • SHA256: f9f802564aa1e3a7c90762c7e65b77007f081cb179cdd9b42607bad3b1fdaf16
  • Pointer size: 132 Bytes
  • Size of remote file: 4.52 MB
models/BiRefNet/RMBG-2.0/config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "ZhengPeng7/BiRefNet",
3
+ "architectures": [
4
+ "BiRefNet"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "BiRefNet_config.BiRefNetConfig",
8
+ "AutoModelForImageSegmentation": "birefnet.BiRefNet"
9
+ },
10
+ "custom_pipelines": {
11
+ "image-segmentation": {
12
+ "pt": [
13
+ "AutoModelForImageSegmentation"
14
+ ],
15
+ "tf": [],
16
+ "type": "image"
17
+ }
18
+ },
19
+ "bb_pretrained": false
20
+ }
models/BiRefNet/RMBG-2.0/diagram1.png ADDED
models/BiRefNet/RMBG-2.0/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:566ed80c3d95f87ada6864d4cbe2290a1c5eb1c7bb0b123e984f60f76b02c3a7
3
+ size 884878856
models/BiRefNet/RMBG-2.0/onnx/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b486f08200f513f460da46dd701db5fbb47d79b4be4b708a19444bcd4e79958
3
+ size 1024331469
models/BiRefNet/RMBG-2.0/onnx/model_bnb4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadc9222fbffa53a348efea52d97475350ecee463a4a46f452e6e6b7b8757d25
3
+ size 355288046
models/BiRefNet/RMBG-2.0/onnx/model_fp16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dc47db40d113090ba5d7a13d8fcfd9ee4eda510ce92613219b2fe19da4746f6
3
+ size 513576499
models/BiRefNet/RMBG-2.0/onnx/model_int8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8ee7690d8c5e7fc45d7b4938ac2fe4eab63fdeddd537673cda2d4c6e74809af
3
+ size 366087445
models/BiRefNet/RMBG-2.0/onnx/model_q4.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a813e0eab56c982b71254214f41fa860cc7b565a6f2aab55d1f99f41c646ece1
3
+ size 367451512
models/BiRefNet/RMBG-2.0/onnx/model_q4f16.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bfeb5f93220eb19f6747c217b62cf04342840c4e973f55bf64e9762919f446d
3
+ size 233815293
models/BiRefNet/RMBG-2.0/onnx/model_quantized.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcea23951a378f92634834888896cc1eec54655366ae6e949282646ce17c5420
3
+ size 366087549
models/BiRefNet/RMBG-2.0/onnx/model_uint8.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fcea23951a378f92634834888896cc1eec54655366ae6e949282646ce17c5420
3
+ size 366087549
models/BiRefNet/RMBG-2.0/preprocessor_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "feature_extractor_type": "ViTFeatureExtractor",
6
+ "image_mean": [
7
+ 0.485,
8
+ 0.456,
9
+ 0.406
10
+ ],
11
+ "image_processor_type": "ViTFeatureExtractor",
12
+ "image_std": [
13
+ 0.229,
14
+ 0.224,
15
+ 0.225
16
+ ],
17
+ "resample": 2,
18
+ "rescale_factor": 0.00392156862745098,
19
+ "size": {
20
+ "height": 1024,
21
+ "width": 1024
22
+ }
23
+ }
models/BiRefNet/RMBG-2.0/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0986c2881028a2d0ef9b638ab06bc4cfe7c529760d451eaa7098ade2592015f2
3
+ size 885079136
models/BiRefNet/RMBG-2.0/t4.png ADDED

Git LFS Details

  • SHA256: 43a9453f567d9bff7fe4481205575bbf302499379047ee6073247315452ba8fb
  • Pointer size: 132 Bytes
  • Size of remote file: 2.16 MB
models/BiRefNet/pth/BiRefNet-general-epoch_244.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11341a6a1c12646627e8d28da025bfec8aad027929d377cbe8fd4759636cc77c
3
+ size 885082437
models/CogVideo/CogVideoX-5b-1.5/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "CogVideoXDDIMScheduler",
3
+ "_diffusers_version": "0.31.0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "v_prediction",
11
+ "rescale_betas_zero_snr": true,
12
+ "sample_max_value": 1.0,
13
+ "set_alpha_to_one": true,
14
+ "snr_shift_scale": 1.0,
15
+ "steps_offset": 0,
16
+ "timestep_spacing": "trailing",
17
+ "trained_betas": null
18
+ }
models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "CogVideoXTransformer3DModel",
3
+ "_diffusers_version": "0.31.0",
4
+ "activation_fn": "gelu-approximate",
5
+ "attention_bias": true,
6
+ "attention_head_dim": 64,
7
+ "dropout": 0.0,
8
+ "flip_sin_to_cos": true,
9
+ "freq_shift": 0,
10
+ "in_channels": 32,
11
+ "max_text_seq_length": 226,
12
+ "norm_elementwise_affine": true,
13
+ "norm_eps": 1e-05,
14
+ "num_attention_heads": 48,
15
+ "num_layers": 42,
16
+ "ofs_embed_dim": 512,
17
+ "out_channels": 16,
18
+ "patch_bias": false,
19
+ "patch_size": 2,
20
+ "patch_size_t": 2,
21
+ "sample_frames": 81,
22
+ "sample_height": 300,
23
+ "sample_width": 300,
24
+ "spatial_interpolation_scale": 1.875,
25
+ "temporal_compression_ratio": 4,
26
+ "temporal_interpolation_scale": 1.0,
27
+ "text_embed_dim": 4096,
28
+ "time_embed_dim": 512,
29
+ "timestep_activation_fn": "silu",
30
+ "use_learned_positional_embeddings": false,
31
+ "use_rotary_positional_embeddings": true
32
+ }
models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/diffusion_pytorch_model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4d392921dd79e0e7adbef45f2c88a21dfe9aeb8688a9dd6e757275b2e4c1bca
3
+ size 4979532864
models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/diffusion_pytorch_model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:052af05f6d91fcb5bc7cb85805c507fc3c476b2bc01a1b2475a384cff03a7854
3
+ size 4948039832
models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/diffusion_pytorch_model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03f3b8812a540aca85a15fb06909cfd36b7b1f3bc638042cf8f6899d38043787
3
+ size 1215733728
models/CogVideo/CogVideoX-5b-1.5/transformer_I2V/diffusion_pytorch_model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
models/CogVideo/CogVideoX-5b-1.5/vae/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKLCogVideoX",
3
+ "_diffusers_version": "0.31.0.dev0",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 256,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "CogVideoXDownBlock3D",
13
+ "CogVideoXDownBlock3D",
14
+ "CogVideoXDownBlock3D",
15
+ "CogVideoXDownBlock3D"
16
+ ],
17
+ "force_upcast": true,
18
+ "in_channels": 3,
19
+ "latent_channels": 16,
20
+ "latents_mean": null,
21
+ "latents_std": null,
22
+ "layers_per_block": 3,
23
+ "norm_eps": 1e-06,
24
+ "norm_num_groups": 32,
25
+ "out_channels": 3,
26
+ "sample_height": 480,
27
+ "sample_width": 720,
28
+ "scaling_factor": 0.7,
29
+ "shift_factor": null,
30
+ "temporal_compression_ratio": 4,
31
+ "up_block_types": [
32
+ "CogVideoXUpBlock3D",
33
+ "CogVideoXUpBlock3D",
34
+ "CogVideoXUpBlock3D",
35
+ "CogVideoXUpBlock3D"
36
+ ],
37
+ "use_post_quant_conv": false,
38
+ "use_quant_conv": false
39
+ }
models/CogVideo/CogVideoX-5b-1.5/vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd47d57ad948ff80da0af0cb2e4dcdef65073aba59bccfd383ada9a7d1c02024
3
+ size 431221142
models/CogVideo/CogVideoX-5b-I2V/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/CogVideo/CogVideoX-5b-I2V/LICENSE ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ The CogVideoX License
2
+
3
+ 1. Definitions
4
+
5
+ “Licensor” means the CogVideoX Model Team that distributes its Software.
6
+
7
+ “Software” means the CogVideoX model parameters made available under this license.
8
+
9
+ 2. License Grant
10
+
11
+ Under the terms and conditions of this license, the licensor hereby grants you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty-free copyright license. The intellectual property rights of the generated content belong to the user to the extent permitted by applicable local laws.
12
+ This license allows you to freely use all open-source models in this repository for academic research. Users who wish to use the models for commercial purposes must register and obtain a basic commercial license in https://open.bigmodel.cn/mla/form .
13
+ Users who have registered and obtained the basic commercial license can use the models for commercial activities for free, but must comply with all terms and conditions of this license. Additionally, the number of service users (visits) for your commercial activities must not exceed 1 million visits per month.
14
+ If the number of service users (visits) for your commercial activities exceeds 1 million visits per month, you need to contact our business team to obtain more commercial licenses.
15
+ The above copyright statement and this license statement should be included in all copies or significant portions of this software.
16
+
17
+ 3. Restriction
18
+
19
+ You will not use, copy, modify, merge, publish, distribute, reproduce, or create derivative works of the Software, in whole or in part, for any military, or illegal purposes.
20
+
21
+ You will not use the Software for any act that may undermine China's national security and national unity, harm the public interest of society, or infringe upon the rights and interests of human beings.
22
+
23
+ 4. Disclaimer
24
+
25
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
+
27
+ 5. Limitation of Liability
28
+
29
+ EXCEPT TO THE EXTENT PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER BASED IN TORT, NEGLIGENCE, CONTRACT, LIABILITY, OR OTHERWISE WILL ANY LICENSOR BE LIABLE TO YOU FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES, OR ANY OTHER COMMERCIAL LOSSES, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
30
+
31
+ 6. Dispute Resolution
32
+
33
+ This license shall be governed and construed in accordance with the laws of People’s Republic of China. Any dispute arising from or in connection with this License shall be submitted to Haidian District People's Court in Beijing.
34
+
35
+ Note that the license is subject to update to a more comprehensive version. For any questions related to the license and copyright, please contact us at [email protected].
36
+
37
+ 1. 定义
38
+
39
+ “许可方”是指分发其软件的 CogVideoX 模型团队。
40
+
41
+ “软件”是指根据本许可提供的 CogVideoX 模型参数。
42
+
43
+ 2. 许可授予
44
+
45
+ 根据本许可的条款和条件,许可方特此授予您非排他性、全球性、不可转让、不可再许可、可撤销、免版税的版权许可。生成内容的知识产权所属,可根据适用当地法律的规定,在法律允许的范围内由用户享有生成内容的知识产权或其他权利。
46
+ 本许可允许您免费使用本仓库中的所有开源模型进行学术研究。对于希望将模型用于商业目的的用户,需在 https://open.bigmodel.cn/mla/form 完成登记并获得基础商用授权。
47
+
48
+ 经过登记并获得基础商用授权的用户可以免费使用本模型进行商业活动,但必须遵守本许可的所有条款和条件。
49
+ 在本许可证下,您的商业活动的服务用户数量(访问量)不得超过100万人次访问 / 每月。如果超过,您需要与我们的商业团队联系以获得更多的商业许可。
50
+ 上述版权声明和本许可声明应包含在本软件的所有副本或重要部分中。
51
+
52
+ 3.限制
53
+
54
+ 您不得出于任何军事或非法目的使用、复制、修改、合并、发布、分发、复制或创建本软件的全部或部分衍生作品。
55
+
56
+ 您不得利用本软件从事任何危害国家安全和国家统一、危害社会公共利益、侵犯人身权益的行为。
57
+
58
+ 4.免责声明
59
+
60
+ 本软件“按原样”提供,不提供任何明示或暗示的保证,包括但不限于对适销性、特定用途的适用性和非侵权性的保证。
61
+ 在任何情况下,作者或版权持有人均不对任何索赔、损害或其他责任负责,无论是在合同诉讼、侵权行为还是其他方面,由软件或软件的使用或其他交易引起、由软件引起或与之相关 软件。
62
+
63
+ 5. 责任限制
64
+
65
+ 除适用��律禁止的范围外,在任何情况下且根据任何法律理论,无论是基于侵权行为、疏忽、合同、责任或其他原因,任何许可方均不对您承担任何直接、间接、特殊、偶然、示范性、 或间接损害,或任何其他商业损失,即使许可人已被告知此类损害的可能性。
66
+
67
+ 6.争议解决
68
+
69
+ 本许可受中华人民共和国法律管辖并按其解释。 因本许可引起的或与本许可有关的任何争议应提交北京市海淀区人民法院。
70
+
71
+ 请注意,许可证可能会更新到更全面的版本。 有关许可和版权的任何问题,请通过 [email protected] 与我们联系。
models/CogVideo/CogVideoX-5b-I2V/README.md ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_link: https://huggingface.co/THUDM/CogVideoX-5b-I2V/blob/main/LICENSE
4
+ language:
5
+ - en
6
+ tags:
7
+ - cogvideox
8
+ - video-generation
9
+ - thudm
10
+ - image-to-video
11
+ inference: false
12
+ ---
13
+
14
+ # CogVideoX-5B-I2V
15
+
16
+ <p style="text-align: center;">
17
+ <div align="center">
18
+ <img src=https://github.com/THUDM/CogVideo/raw/main/resources/logo.svg width="50%"/>
19
+ </div>
20
+ <p align="center">
21
+ <a href="https://huggingface.co/THUDM//CogVideoX-5b-I2V/blob/main/README.md">📄 Read in English</a> |
22
+ <a href="https://huggingface.co/spaces/THUDM/CogVideoX-5B-Space">🤗 Huggingface Space</a> |
23
+ <a href="https://github.com/THUDM/CogVideo">🌐 Github </a> |
24
+ <a href="https://arxiv.org/pdf/2408.06072">📜 arxiv </a>
25
+ </p>
26
+ <p align="center">
27
+ 📍 Visit <a href="https://chatglm.cn/video?fr=osm_cogvideox">Qingying</a> and <a href="https://open.bigmodel.cn/?utm_campaign=open&_channel_track_key=OWTVNma9">API Platform</a> for the commercial version of the video generation model
28
+ </p>
29
+
30
+ ## Model Introduction
31
+
32
+ CogVideoX is an open-source video generation model originating
33
+ from [Qingying](https://chatglm.cn/video?fr=osm_cogvideo). The table below presents information related to the video
34
+ generation models we offer in this version.
35
+
36
+ <table style="border-collapse: collapse; width: 100%;">
37
+ <tr>
38
+ <th style="text-align: center;">Model Name</th>
39
+ <th style="text-align: center;">CogVideoX-2B</th>
40
+ <th style="text-align: center;">CogVideoX-5B</th>
41
+ <th style="text-align: center;">CogVideoX-5B-I2V (This Repository)</th>
42
+ </tr>
43
+ <tr>
44
+ <td style="text-align: center;">Model Description</td>
45
+ <td style="text-align: center;">Entry-level model, balancing compatibility. Low cost for running and secondary development.</td>
46
+ <td style="text-align: center;">Larger model with higher video generation quality and better visual effects.</td>
47
+ <td style="text-align: center;">CogVideoX-5B image-to-video version.</td>
48
+ </tr>
49
+ <tr>
50
+ <td style="text-align: center;">Inference Precision</td>
51
+ <td style="text-align: center;"><b>FP16*(recommended)</b>, BF16, FP32, FP8*, INT8, not supported: INT4</td>
52
+ <td colspan="2" style="text-align: center;"><b>BF16 (recommended)</b>, FP16, FP32, FP8*, INT8, not supported: INT4</td>
53
+ </tr>
54
+ <tr>
55
+ <td style="text-align: center;">Single GPU Memory Usage<br></td>
56
+ <td style="text-align: center;"><a href="https://github.com/THUDM/SwissArmyTransformer">SAT</a> FP16: 18GB <br><b>diffusers FP16: from 4GB* </b><br><b>diffusers INT8 (torchao): from 3.6GB*</b></td>
57
+ <td colspan="2" style="text-align: center;"><a href="https://github.com/THUDM/SwissArmyTransformer">SAT</a> BF16: 26GB <br><b>diffusers BF16: from 5GB* </b><br><b>diffusers INT8 (torchao): from 4.4GB*</b></td>
58
+ </tr>
59
+ <tr>
60
+ <td style="text-align: center;">Multi-GPU Inference Memory Usage</td>
61
+ <td style="text-align: center;"><b>FP16: 10GB* using diffusers</b><br></td>
62
+ <td colspan="2" style="text-align: center;"><b>BF16: 15GB* using diffusers</b><br></td>
63
+ </tr>
64
+ <tr>
65
+ <td style="text-align: center;">Inference Speed<br>(Step = 50, FP/BF16)</td>
66
+ <td style="text-align: center;">Single A100: ~90 seconds<br>Single H100: ~45 seconds</td>
67
+ <td colspan="2" style="text-align: center;">Single A100: ~180 seconds<br>Single H100: ~90 seconds</td>
68
+ </tr>
69
+ <tr>
70
+ <td style="text-align: center;">Fine-tuning Precision</td>
71
+ <td style="text-align: center;"><b>FP16</b></td>
72
+ <td colspan="2" style="text-align: center;"><b>BF16</b></td>
73
+ </tr>
74
+ <tr>
75
+ <td style="text-align: center;">Fine-tuning Memory Usage</td>
76
+ <td style="text-align: center;">47 GB (bs=1, LORA)<br> 61 GB (bs=2, LORA)<br> 62GB (bs=1, SFT)</td>
77
+ <td style="text-align: center;">63 GB (bs=1, LORA)<br> 80 GB (bs=2, LORA)<br> 75GB (bs=1, SFT)<br></td>
78
+ <td style="text-align: center;">78 GB (bs=1, LORA)<br> 75GB (bs=1, SFT, 16GPU)<br></td>
79
+ </tr>
80
+ <tr>
81
+ <td style="text-align: center;">Prompt Language</td>
82
+ <td colspan="3" style="text-align: center;">English*</td>
83
+ </tr>
84
+ <tr>
85
+ <td style="text-align: center;">Maximum Prompt Length</td>
86
+ <td colspan="3" style="text-align: center;">226 Tokens</td>
87
+ </tr>
88
+ <tr>
89
+ <td style="text-align: center;">Video Length</td>
90
+ <td colspan="3" style="text-align: center;">6 Seconds</td>
91
+ </tr>
92
+ <tr>
93
+ <td style="text-align: center;">Frame Rate</td>
94
+ <td colspan="3" style="text-align: center;">8 Frames / Second</td>
95
+ </tr>
96
+ <tr>
97
+ <td style="text-align: center;">Video Resolution</td>
98
+ <td colspan="3" style="text-align: center;">720 x 480, no support for other resolutions (including fine-tuning)</td>
99
+ </tr>
100
+ <tr>
101
+ <td style="text-align: center;">Position Embedding</td>
102
+ <td style="text-align: center;">3d_sincos_pos_embed</td>
103
+ <td style="text-align: center;">3d_rope_pos_embed</td>
104
+ <td style="text-align: center;">3d_rope_pos_embed + learnable_pos_embed</td>
105
+ </tr>
106
+ </table>
107
+
108
+ **Data Explanation**
109
+
110
+ + While testing using the diffusers library, all optimizations included in the diffusers library were enabled. This
111
+ scheme has not been tested for actual memory usage on devices outside of **NVIDIA A100 / H100** architectures.
112
+ Generally, this scheme can be adapted to all **NVIDIA Ampere architecture** and above devices. If optimizations are
113
+ disabled, memory consumption will multiply, with peak memory usage being about 3 times the value in the table.
114
+ However, speed will increase by about 3-4 times. You can selectively disable some optimizations, including:
115
+
116
+ ```
117
+ pipe.enable_sequential_cpu_offload()
118
+ pipe.vae.enable_slicing()
119
+ pipe.vae.enable_tiling()
120
+ ```
121
+
122
+ + For multi-GPU inference, the `enable_sequential_cpu_offload()` optimization needs to be disabled.
123
+ + Using INT8 models will slow down inference, which is done to accommodate lower-memory GPUs while maintaining minimal
124
+ video quality loss, though inference speed will significantly decrease.
125
+ + The CogVideoX-2B model was trained in `FP16` precision, and all CogVideoX-5B models were trained in `BF16` precision.
126
+ We recommend using the precision in which the model was trained for inference.
127
+ + [PytorchAO](https://github.com/pytorch/ao) and [Optimum-quanto](https://github.com/huggingface/optimum-quanto/) can be
128
+ used to quantize the text encoder, transformer, and VAE modules to reduce the memory requirements of CogVideoX. This
129
+ allows the model to run on free T4 Colabs or GPUs with smaller memory! Also, note that TorchAO quantization is fully
130
+ compatible with `torch.compile`, which can significantly improve inference speed. FP8 precision must be used on
131
+ devices with NVIDIA H100 and above, requiring source installation of `torch`, `torchao`, `diffusers`, and `accelerate`
132
+ Python packages. CUDA 12.4 is recommended.
133
+ + The inference speed tests also used the above memory optimization scheme. Without memory optimization, inference speed
134
+ increases by about 10%. Only the `diffusers` version of the model supports quantization.
135
+ + The model only supports English input; other languages can be translated into English for use via large model
136
+ refinement.
137
+ + The memory usage of model fine-tuning is tested in an `8 * H100` environment, and the program automatically
138
+ uses `Zero 2` optimization. If a specific number of GPUs is marked in the table, that number or more GPUs must be used
139
+ for fine-tuning.
140
+
141
+ **Reminders**
142
+
143
+ + Use [SAT](https://github.com/THUDM/SwissArmyTransformer) for inference and fine-tuning SAT version models. Feel free
144
+ to visit our GitHub for more details.
145
+
146
+ ## Getting Started Quickly 🤗
147
+
148
+ This model supports deployment using the Hugging Face diffusers library. You can follow the steps below to get started.
149
+
150
+ **We recommend that you visit our [GitHub](https://github.com/THUDM/CogVideo) to check out prompt optimization and
151
+ conversion to get a better experience.**
152
+
153
+ 1. Install the required dependencies
154
+
155
+ ```shell
156
+ # diffusers>=0.30.3
157
+ # transformers>=0.44.2
158
+ # accelerate>=0.34.0
159
+ # imageio-ffmpeg>=0.5.1
160
+ pip install --upgrade transformers accelerate diffusers imageio-ffmpeg
161
+ ```
162
+
163
+ 2. Run the code
164
+
165
+ ```python
166
+ import torch
167
+ from diffusers import CogVideoXImageToVideoPipeline
168
+ from diffusers.utils import export_to_video, load_image
169
+
170
+ prompt = "A little girl is riding a bicycle at high speed. Focused, detailed, realistic."
171
+ image = load_image(image="input.jpg")
172
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
173
+ "THUDM/CogVideoX-5b-I2V",
174
+ torch_dtype=torch.bfloat16
175
+ )
176
+
177
+ pipe.enable_sequential_cpu_offload()
178
+ pipe.vae.enable_tiling()
179
+ pipe.vae.enable_slicing()
180
+
181
+ video = pipe(
182
+ prompt=prompt,
183
+ image=image,
184
+ num_videos_per_prompt=1,
185
+ num_inference_steps=50,
186
+ num_frames=49,
187
+ guidance_scale=6,
188
+ generator=torch.Generator(device="cuda").manual_seed(42),
189
+ ).frames[0]
190
+
191
+ export_to_video(video, "output.mp4", fps=8)
192
+ ```
193
+
194
+ ## Quantized Inference
195
+
196
+ [PytorchAO](https://github.com/pytorch/ao) and [Optimum-quanto](https://github.com/huggingface/optimum-quanto/) can be
197
+ used to quantize the text encoder, transformer, and VAE modules to reduce CogVideoX's memory requirements. This allows
198
+ the model to run on free T4 Colab or GPUs with lower VRAM! Also, note that TorchAO quantization is fully compatible
199
+ with `torch.compile`, which can significantly accelerate inference.
200
+
201
+ ```
202
+ # To get started, PytorchAO needs to be installed from the GitHub source and PyTorch Nightly.
203
+ # Source and nightly installation is only required until the next release.
204
+
205
+ import torch
206
+ from diffusers import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel, CogVideoXImageToVideoPipeline
207
+ from diffusers.utils import export_to_video, load_image
208
+ from transformers import T5EncoderModel
209
+ from torchao.quantization import quantize_, int8_weight_only
210
+
211
+ quantization = int8_weight_only
212
+
213
+ text_encoder = T5EncoderModel.from_pretrained("THUDM/CogVideoX-5b-I2V", subfolder="text_encoder", torch_dtype=torch.bfloat16)
214
+ quantize_(text_encoder, quantization())
215
+
216
+ transformer = CogVideoXTransformer3DModel.from_pretrained("THUDM/CogVideoX-5b-I2V",subfolder="transformer", torch_dtype=torch.bfloat16)
217
+ quantize_(transformer, quantization())
218
+
219
+ vae = AutoencoderKLCogVideoX.from_pretrained("THUDM/CogVideoX-5b-I2V", subfolder="vae", torch_dtype=torch.bfloat16)
220
+ quantize_(vae, quantization())
221
+
222
+ # Create pipeline and run inference
223
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
224
+ "THUDM/CogVideoX-5b-I2V",
225
+ text_encoder=text_encoder,
226
+ transformer=transformer,
227
+ vae=vae,
228
+ torch_dtype=torch.bfloat16,
229
+ )
230
+
231
+ pipe.enable_model_cpu_offload()
232
+ pipe.vae.enable_tiling()
233
+ pipe.vae.enable_slicing()
234
+
235
+ prompt = "A little girl is riding a bicycle at high speed. Focused, detailed, realistic."
236
+ image = load_image(image="input.jpg")
237
+ video = pipe(
238
+ prompt=prompt,
239
+ image=image,
240
+ num_videos_per_prompt=1,
241
+ num_inference_steps=50,
242
+ num_frames=49,
243
+ guidance_scale=6,
244
+ generator=torch.Generator(device="cuda").manual_seed(42),
245
+ ).frames[0]
246
+
247
+ export_to_video(video, "output.mp4", fps=8)
248
+ ```
249
+
250
+ Additionally, these models can be serialized and stored using PytorchAO in quantized data types to save disk space. You
251
+ can find examples and benchmarks at the following links:
252
+
253
+ - [torchao](https://gist.github.com/a-r-r-o-w/4d9732d17412888c885480c6521a9897)
254
+ - [quanto](https://gist.github.com/a-r-r-o-w/31be62828b00a9292821b85c1017effa)
255
+
256
+ ## Further Exploration
257
+
258
+ Feel free to enter our [GitHub](https://github.com/THUDM/CogVideo), where you'll find:
259
+
260
+ 1. More detailed technical explanations and code.
261
+ 2. Optimized prompt examples and conversions.
262
+ 3. Detailed code for model inference and fine-tuning.
263
+ 4. Project update logs and more interactive opportunities.
264
+ 5. CogVideoX toolchain to help you better use the model.
265
+ 6. INT8 model inference code.
266
+
267
+ ## Model License
268
+
269
+ This model is released under the [CogVideoX LICENSE](LICENSE).
270
+
271
+ ## Citation
272
+
273
+ ```
274
+ @article{yang2024cogvideox,
275
+ title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer},
276
+ author={Yang, Zhuoyi and Teng, Jiayan and Zheng, Wendi and Ding, Ming and Huang, Shiyu and Xu, Jiazheng and Yang, Yuanming and Hong, Wenyi and Zhang, Xiaohan and Feng, Guanyu and others},
277
+ journal={arXiv preprint arXiv:2408.06072},
278
+ year={2024}
279
+ }
280
+ ```
models/CogVideo/CogVideoX-5b-I2V/README_zh.md ADDED
@@ -0,0 +1,252 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CogVideoX-5B-I2V
2
+
3
+ <p style="text-align: center;">
4
+ <div align="center">
5
+ <img src=https://github.com/THUDM/CogVideo/raw/main/resources/logo.svg width="50%"/>
6
+ </div>
7
+ <p align="center">
8
+ <a href="https://huggingface.co/THUDM/CogVideoX-5b-I2V/blob/main/README.md">📄 Read in English</a> |
9
+ <a href="https://huggingface.co/spaces/THUDM/CogVideoX-5B-Space">🤗 Huggingface Space</a> |
10
+ <a href="https://github.com/THUDM/CogVideo">🌐 Github </a> |
11
+ <a href="https://arxiv.org/pdf/2408.06072">📜 arxiv </a>
12
+ </p>
13
+ <p align="center">
14
+ 📍 前往<a href="https://chatglm.cn/video?fr=osm_cogvideox"> 清影</a> 和 <a href="https://open.bigmodel.cn/?utm_campaign=open&_channel_track_key=OWTVNma9"> API平台</a> 体验商业版视频生成模型
15
+ </p>
16
+
17
+ ## 模型介绍
18
+
19
+ CogVideoX是 [清影](https://chatglm.cn/video?fr=osm_cogvideo) 同源的开源版本视频生成模型。下表展示我们在本代提供的视频生成模型列表相关信息:
20
+
21
+ <table style="border-collapse: collapse; width: 100%;">
22
+ <tr>
23
+ <th style="text-align: center;">模型名</th>
24
+ <th style="text-align: center;">CogVideoX-2B</th>
25
+ <th style="text-align: center;">CogVideoX-5B</th>
26
+ <th style="text-align: center;">CogVideoX-5B-I2V (本仓库)</th>
27
+ </tr>
28
+ <tr>
29
+ <td style="text-align: center;">模型介绍</td>
30
+ <td style="text-align: center;">入门级模型,兼顾兼容性。运行,二次开发成本低。</td>
31
+ <td style="text-align: center;">视频生成质量更高,视觉效果更好的更大尺寸模型。</td>
32
+ <td style="text-align: center;">CogVideoX-5B 图生视频版本。</td>
33
+ </tr>
34
+ <tr>
35
+ <td style="text-align: center;">推理精度</td>
36
+ <td style="text-align: center;"><b>FP16*(推荐)</b>, BF16, FP32,FP8*,INT8,不支持INT4</td>
37
+ <td colspan="2" style="text-align: center;"><b>BF16(推荐)</b>, FP16, FP32,FP8*,INT8,不支持INT4</td>
38
+ </tr>
39
+ <tr>
40
+ <td style="text-align: center;">单GPU显存消耗<br></td>
41
+ <td style="text-align: center;"><a href="https://github.com/THUDM/SwissArmyTransformer">SAT</a> FP16: 18GB <br><b>diffusers FP16: 4GB起* </b><br><b>diffusers INT8(torchao): 3.6G起*</b></td>
42
+ <td colspan="2" style="text-align: center;"><a href="https://github.com/THUDM/SwissArmyTransformer">SAT</a> BF16: 26GB <br><b>diffusers BF16 : 5GB起* </b><br><b>diffusers INT8(torchao): 4.4G起* </b></td>
43
+ </tr>
44
+ <tr>
45
+ <td style="text-align: center;">多GPU推理显存消耗</td>
46
+ <td style="text-align: center;"><b>FP16: 10GB* using diffusers</b><br></td>
47
+ <td colspan="2" style="text-align: center;"><b>BF16: 15GB* using diffusers</b><br></td>
48
+ </tr>
49
+ <tr>
50
+ <td style="text-align: center;">推理速度<br>(Step = 50, FP/BF16)</td>
51
+ <td style="text-align: center;">单卡A100: ~90秒<br>单卡H100: ~45秒</td>
52
+ <td colspan="2" style="text-align: center;">单卡A100: ~180秒<br>单卡H100: ~90秒</td>
53
+ </tr>
54
+ <tr>
55
+ <td style="text-align: center;">微调精度</td>
56
+ <td style="text-align: center;"><b>FP16</b></td>
57
+ <td colspan="2" style="text-align: center;"><b>BF16</b></td>
58
+ </tr>
59
+ <tr>
60
+ <td style="text-align: center;">微调显存消耗</td>
61
+ <td style="text-align: center;">47 GB (bs=1, LORA)<br> 61 GB (bs=2, LORA)<br> 62GB (bs=1, SFT)</td>
62
+ <td style="text-align: center;">63 GB (bs=1, LORA)<br> 80 GB (bs=2, LORA)<br> 75GB (bs=1, SFT)<br></td>
63
+ <td style="text-align: center;">78 GB (bs=1, LORA)<br> 75GB (bs=1, SFT, 16GPU)<br></td>
64
+ </tr>
65
+ <tr>
66
+ <td style="text-align: center;">提示词语言</td>
67
+ <td colspan="3" style="text-align: center;">English*</td>
68
+ </tr>
69
+ <tr>
70
+ <td style="text-align: center;">提示词长度上限</td>
71
+ <td colspan="3" style="text-align: center;">226 Tokens</td>
72
+ </tr>
73
+ <tr>
74
+ <td style="text-align: center;">视频长度</td>
75
+ <td colspan="3" style="text-align: center;">6 秒</td>
76
+ </tr>
77
+ <tr>
78
+ <td style="text-align: center;">帧率</td>
79
+ <td colspan="3" style="text-align: center;">8 帧 / 秒 </td>
80
+ </tr>
81
+ <tr>
82
+ <td style="text-align: center;">视频分辨率</td>
83
+ <td colspan="3" style="text-align: center;">720 * 480,不支持其他分辨率(含微调)</td>
84
+ </tr>
85
+ <tr>
86
+ <td style="text-align: center;">位置编码</td>
87
+ <td style="text-align: center;">3d_sincos_pos_embed</td>
88
+ <td style="text-align: center;">3d_rope_pos_embed</td>
89
+ <td style="text-align: center;">3d_rope_pos_embed + learnable_pos_embed</td>
90
+ </tr>
91
+ </table>
92
+
93
+ **数据解释**
94
+
95
+ + 使用 diffusers 库进行测试时,启用了全部`diffusers`库自带的优化,该方案未测试在非**NVIDIA A100 / H100**
96
+ 外的设备上的实际显存 / 内存占用。通常,该方案可以适配于所有 **NVIDIA 安培架构**
97
+ 以上的设备。若关闭优化,显存占用会成倍增加,峰值显存约为表格的3倍。但速度提升3-4倍左右。你可以选择性的关闭部分优化,这些优化包括:
98
+
99
+ ```
100
+ pipe.enable_sequential_cpu_offload()
101
+ pipe.vae.enable_slicing()
102
+ pipe.vae.enable_tiling()
103
+ ```
104
+
105
+ + 多GPU推理时,需要关闭 `enable_sequential_cpu_offload()` 优化。
106
+ + 使用 INT8 模型会导致推理速度降低,此举是为了满足显存较低的显卡能正常推理并保持较少的视频质量损失,推理速度大幅降低。
107
+ + CogVideoX-2B 模型采用 `FP16` 精度训练, 搜有 CogVideoX-5B 模型采用 `BF16` 精度训练。我们推荐使用模型训练的精度进行推理。
108
+ + [PytorchAO](https://github.com/pytorch/ao) 和 [Optimum-quanto](https://github.com/huggingface/optimum-quanto/)
109
+ 可以用于量化文本编码器、Transformer 和 VAE 模块,以降低 CogVideoX 的内存需求。这使得在免费的 T4 Colab 或更小显存的 GPU
110
+ 上运行模型成为可能!同样值得注意的是,TorchAO 量化完全兼容 `torch.compile`,这可以显著提高推理速度。在 `NVIDIA H100`
111
+ 及以上设备上必须使用 `FP8` 精度,这需要源码安装 `torch`、`torchao`、`diffusers` 和 `accelerate` Python
112
+ 包。建议使用 `CUDA 12.4`。
113
+ + 推理速度测试同样采用了上述显存优化方案,不采用显存优化的情况下,推理速度提升约10%。 只有`diffusers`版本模型支持量化。
114
+ + 模型仅支持英语输入,其他语言可以通过大模型润色时翻译为英语。
115
+ + 模型微调所占用的显存是在 `8 * H100` 环境下进行测试,程序已经自动使用`Zero 2` 优化。表格中若有标注具体GPU数量则必须使用大于等于该数量的GPU进行微调。
116
+
117
+ **提醒**
118
+
119
+ + 使用 [SAT](https://github.com/THUDM/SwissArmyTransformer) 推理和微调SAT版本模型。欢迎前往我们的github查看。
120
+
121
+ ## 快速上手 🤗
122
+
123
+ 本模型已经支持使用 huggingface 的 diffusers 库进行部署,你可以按照以下步骤进行部署。
124
+
125
+ **我们推荐您进入我们的 [github](https://github.com/THUDM/CogVideo) 并查看相关的提示词优化和转换,以获得更好的体验。**
126
+
127
+ 1. 安装对应的依赖
128
+
129
+ ```shell
130
+ # diffusers>=0.30.3
131
+ # transformers>=0.44.2
132
+ # accelerate>=0.34.0
133
+ # imageio-ffmpeg>=0.5.1
134
+ pip install --upgrade transformers accelerate diffusers imageio-ffmpeg
135
+ ```
136
+
137
+ 2. 运行代码
138
+
139
+ ```
140
+ import torch
141
+ from diffusers import CogVideoXImageToVideoPipeline
142
+ from diffusers.utils import export_to_video, load_image
143
+
144
+ prompt = "A little girl is riding a bicycle at high speed. Focused, detailed, realistic."
145
+ image = load_image(image="input.jpg")
146
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
147
+ "THUDM/CogVideoX-5b-I2V",
148
+ torch_dtype=torch.bfloat16
149
+ )
150
+
151
+ pipe.enable_sequential_cpu_offload()
152
+ pipe.vae.enable_tiling()
153
+ pipe.vae.enable_slicing()
154
+
155
+ video = pipe(
156
+ prompt=prompt,
157
+ image=image,
158
+ num_videos_per_prompt=1,
159
+ num_inference_steps=50,
160
+ num_frames=49,
161
+ guidance_scale=6,
162
+ generator=torch.Generator(device="cuda").manual_seed(42),
163
+ ).frames[0]
164
+
165
+ export_to_video(video, "output.mp4", fps=8)
166
+ ```
167
+
168
+ ## Quantized Inference
169
+
170
+ [PytorchAO](https://github.com/pytorch/ao) 和 [Optimum-quanto](https://github.com/huggingface/optimum-quanto/)
171
+ 可以用于对文本编码器、Transformer 和 VAE 模块进行量化,从而降低 CogVideoX 的内存需求。这使得在免费的 T4 Colab 或较小 VRAM 的
172
+ GPU 上运行该模型成为可能!值得注意的是,TorchAO 量化与 `torch.compile` 完全兼容,这可以显著加快推理速度。
173
+
174
+ ```python
175
+ # To get started, PytorchAO needs to be installed from the GitHub source and PyTorch Nightly.
176
+ # Source and nightly installation is only required until the next release.
177
+
178
+ import torch
179
+ from diffusers import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel, CogVideoXImageToVideoPipeline
180
+ from diffusers.utils import export_to_video, load_image
181
+ from transformers import T5EncoderModel
182
+ from torchao.quantization import quantize_, int8_weight_only
183
+
184
+ quantization = int8_weight_only
185
+
186
+ text_encoder = T5EncoderModel.from_pretrained("THUDM/CogVideoX-5b-I2V", subfolder="text_encoder", torch_dtype=torch.bfloat16)
187
+ quantize_(text_encoder, quantization())
188
+
189
+ transformer = CogVideoXTransformer3DModel.from_pretrained("THUDM/CogVideoX-5b-I2V",subfolder="transformer", torch_dtype=torch.bfloat16)
190
+ quantize_(transformer, quantization())
191
+
192
+ vae = AutoencoderKLCogVideoX.from_pretrained("THUDM/CogVideoX-5b-I2V", subfolder="vae", torch_dtype=torch.bfloat16)
193
+ quantize_(vae, quantization())
194
+
195
+ # Create pipeline and run inference
196
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
197
+ "THUDM/CogVideoX-5b-I2V",
198
+ text_encoder=text_encoder,
199
+ transformer=transformer,
200
+ vae=vae,
201
+ torch_dtype=torch.bfloat16,
202
+ )
203
+
204
+ pipe.enable_model_cpu_offload()
205
+ pipe.vae.enable_tiling()
206
+ pipe.vae.enable_slicing()
207
+
208
+ prompt = "A little girl is riding a bicycle at high speed. Focused, detailed, realistic."
209
+ image = load_image(image="input.jpg")
210
+ video = pipe(
211
+ prompt=prompt,
212
+ image=image,
213
+ num_videos_per_prompt=1,
214
+ num_inference_steps=50,
215
+ num_frames=49,
216
+ guidance_scale=6,
217
+ generator=torch.Generator(device="cuda").manual_seed(42),
218
+ ).frames[0]
219
+
220
+ export_to_video(video, "output.mp4", fps=8)
221
+ ```
222
+
223
+ 此外,这些模型可以通过使用PytorchAO以量化数据类型序列化并存储,从而节省磁盘空间。你可以在以下链接中找到示例和基准测试。
224
+
225
+ - [torchao](https://gist.github.com/a-r-r-o-w/4d9732d17412888c885480c6521a9897)
226
+ - [quanto](https://gist.github.com/a-r-r-o-w/31be62828b00a9292821b85c1017effa)
227
+
228
+ ## 深入研究
229
+
230
+ 欢迎进入我们的 [github](https://github.com/THUDM/CogVideo),你将获得:
231
+
232
+ 1. 更加详细的技术细节介绍和代码解释。
233
+ 2. 提示词的优化和转换。
234
+ 3. 模型推理和微调的详细代码。
235
+ 4. 项目更新日志动态,更多互动机会。
236
+ 5. CogVideoX 工具链,帮助您更好的使用模型。
237
+ 6. INT8 模型推理代码。
238
+
239
+ ## 模型协议
240
+
241
+ 该模型根据 [CogVideoX LICENSE](LICENSE) 许可证发布。
242
+
243
+ ## 引用
244
+
245
+ ```
246
+ @article{yang2024cogvideox,
247
+ title={CogVideoX: Text-to-Video Diffusion Models with An Expert Transformer},
248
+ author={Yang, Zhuoyi and Teng, Jiayan and Zheng, Wendi and Ding, Ming and Huang, Shiyu and Xu, Jiazheng and Yang, Yuanming and Hong, Wenyi and Zhang, Xiaohan and Feng, Guanyu and others},
249
+ journal={arXiv preprint arXiv:2408.06072},
250
+ year={2024}
251
+ }
252
+ ```
models/CogVideo/CogVideoX-5b-I2V/configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"framework":"Pytorch","task":"image-to-video"}
models/CogVideo/CogVideoX-5b-I2V/model_index.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "CogVideoXImageToVideoPipeline",
3
+ "_diffusers_version": "0.31.0.dev0",
4
+ "scheduler": [
5
+ "diffusers",
6
+ "CogVideoXDDIMScheduler"
7
+ ],
8
+ "text_encoder": [
9
+ "transformers",
10
+ "T5EncoderModel"
11
+ ],
12
+ "tokenizer": [
13
+ "transformers",
14
+ "T5Tokenizer"
15
+ ],
16
+ "transformer": [
17
+ "diffusers",
18
+ "CogVideoXTransformer3DModel"
19
+ ],
20
+ "vae": [
21
+ "diffusers",
22
+ "AutoencoderKLCogVideoX"
23
+ ]
24
+ }
models/CogVideo/CogVideoX-5b-I2V/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "CogVideoXDDIMScheduler",
3
+ "_diffusers_version": "0.31.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "v_prediction",
11
+ "rescale_betas_zero_snr": true,
12
+ "sample_max_value": 1.0,
13
+ "set_alpha_to_one": true,
14
+ "snr_shift_scale": 1.0,
15
+ "steps_offset": 0,
16
+ "timestep_spacing": "trailing",
17
+ "trained_betas": null
18
+ }
models/CogVideo/CogVideoX-5b-I2V/transformer/config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "CogVideoXTransformer3DModel",
3
+ "_diffusers_version": "0.31.0.dev0",
4
+ "activation_fn": "gelu-approximate",
5
+ "attention_bias": true,
6
+ "attention_head_dim": 64,
7
+ "dropout": 0.0,
8
+ "flip_sin_to_cos": true,
9
+ "freq_shift": 0,
10
+ "in_channels": 32,
11
+ "max_text_seq_length": 226,
12
+ "norm_elementwise_affine": true,
13
+ "norm_eps": 1e-05,
14
+ "num_attention_heads": 48,
15
+ "num_layers": 42,
16
+ "out_channels": 16,
17
+ "patch_size": 2,
18
+ "sample_frames": 49,
19
+ "sample_height": 60,
20
+ "sample_width": 90,
21
+ "spatial_interpolation_scale": 1.875,
22
+ "temporal_compression_ratio": 4,
23
+ "temporal_interpolation_scale": 1.0,
24
+ "text_embed_dim": 4096,
25
+ "time_embed_dim": 512,
26
+ "timestep_activation_fn": "silu",
27
+ "use_learned_positional_embeddings": true,
28
+ "use_rotary_positional_embeddings": true
29
+ }
models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00001-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2e3060199c34a0d18892a19d687455f938b0ac3d2ea7d48f37cb4090e141965
3
+ size 4992465072
models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00002-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e8d0c62d366b0d9cc3476d2b21ca54afbecea154d54d923da120b2ec174c7e7
3
+ size 4985800640
models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model-00003-of-00003.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da91a0051da3f39caf10944b7c9aa66b14ddeffb37a25b087c49fc1692c1a361
3
+ size 1272025856
models/CogVideo/CogVideoX-5b-I2V/transformer/diffusion_pytorch_model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
models/CogVideo/CogVideoX-5b-I2V/vae/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKLCogVideoX",
3
+ "_diffusers_version": "0.31.0.dev0",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 256,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "CogVideoXDownBlock3D",
13
+ "CogVideoXDownBlock3D",
14
+ "CogVideoXDownBlock3D",
15
+ "CogVideoXDownBlock3D"
16
+ ],
17
+ "force_upcast": true,
18
+ "in_channels": 3,
19
+ "latent_channels": 16,
20
+ "latents_mean": null,
21
+ "latents_std": null,
22
+ "layers_per_block": 3,
23
+ "norm_eps": 1e-06,
24
+ "norm_num_groups": 32,
25
+ "out_channels": 3,
26
+ "sample_height": 480,
27
+ "sample_width": 720,
28
+ "scaling_factor": 0.7,
29
+ "shift_factor": null,
30
+ "temporal_compression_ratio": 4,
31
+ "up_block_types": [
32
+ "CogVideoXUpBlock3D",
33
+ "CogVideoXUpBlock3D",
34
+ "CogVideoXUpBlock3D",
35
+ "CogVideoXUpBlock3D"
36
+ ],
37
+ "use_post_quant_conv": false,
38
+ "use_quant_conv": false
39
+ }
models/CogVideo/CogVideoX-5b-I2V/vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd47d57ad948ff80da0af0cb2e4dcdef65073aba59bccfd383ada9a7d1c02024
3
+ size 431221142
models/CogVideo/CogVideoX-5b/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "CogVideoXDDIMScheduler",
3
+ "_diffusers_version": "0.31.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "v_prediction",
11
+ "rescale_betas_zero_snr": true,
12
+ "sample_max_value": 1.0,
13
+ "set_alpha_to_one": true,
14
+ "snr_shift_scale": 1.0,
15
+ "steps_offset": 0,
16
+ "timestep_spacing": "trailing",
17
+ "trained_betas": null
18
+ }
models/CogVideo/CogVideoX-5b/transformer/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "CogVideoXTransformer3DModel",
3
+ "_diffusers_version": "0.31.0.dev0",
4
+ "activation_fn": "gelu-approximate",
5
+ "attention_bias": true,
6
+ "attention_head_dim": 64,
7
+ "dropout": 0.0,
8
+ "flip_sin_to_cos": true,
9
+ "freq_shift": 0,
10
+ "in_channels": 16,
11
+ "max_text_seq_length": 226,
12
+ "norm_elementwise_affine": true,
13
+ "norm_eps": 1e-05,
14
+ "num_attention_heads": 48,
15
+ "num_layers": 42,
16
+ "out_channels": 16,
17
+ "patch_size": 2,
18
+ "sample_frames": 49,
19
+ "sample_height": 60,
20
+ "sample_width": 90,
21
+ "spatial_interpolation_scale": 1.875,
22
+ "temporal_compression_ratio": 4,
23
+ "temporal_interpolation_scale": 1.0,
24
+ "text_embed_dim": 4096,
25
+ "time_embed_dim": 512,
26
+ "timestep_activation_fn": "silu",
27
+ "use_rotary_positional_embeddings": true
28
+ }
models/CogVideo/CogVideoX-5b/transformer/diffusion_pytorch_model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7101be7e75631130cdf4a63ad798452bdce29716aaa47829e882dd384c398bf
3
+ size 9925342208
models/CogVideo/CogVideoX-5b/transformer/diffusion_pytorch_model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebe6c0e34a52c89f8ea8032a3a8a278a9ff1880dc70e1e6f3b840bcfd0396647
3
+ size 1215340384
models/CogVideo/CogVideoX-5b/transformer/diffusion_pytorch_model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
models/CogVideo/CogVideoX-5b/vae/config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKLCogVideoX",
3
+ "_diffusers_version": "0.32.0.dev0",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 256,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "CogVideoXDownBlock3D",
13
+ "CogVideoXDownBlock3D",
14
+ "CogVideoXDownBlock3D",
15
+ "CogVideoXDownBlock3D"
16
+ ],
17
+ "force_upcast": true,
18
+ "in_channels": 3,
19
+ "latent_channels": 16,
20
+ "latents_mean": null,
21
+ "latents_std": null,
22
+ "layers_per_block": 3,
23
+ "norm_eps": 1e-06,
24
+ "norm_num_groups": 32,
25
+ "out_channels": 3,
26
+ "sample_height": 480,
27
+ "sample_width": 720,
28
+ "scaling_factor": 0.7,
29
+ "shift_factor": null,
30
+ "temporal_compression_ratio": 4,
31
+ "up_block_types": [
32
+ "CogVideoXUpBlock3D",
33
+ "CogVideoXUpBlock3D",
34
+ "CogVideoXUpBlock3D",
35
+ "CogVideoXUpBlock3D"
36
+ ],
37
+ "use_post_quant_conv": false,
38
+ "use_quant_conv": false,
39
+ "invert_scale_latents": false
40
+ }
models/CogVideo/CogVideoX-5b/vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a410e48d988c8224cef392b68db0654485cfd41f345f4a3a81d3e6b765bb995e
3
+ size 862388596
models/CogVideo/CogVideoX-Fun-V1.1-5b-Control/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "CogVideoXDDIMScheduler",
3
+ "_diffusers_version": "0.31.0.dev0",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "clip_sample_range": 1.0,
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "v_prediction",
11
+ "rescale_betas_zero_snr": true,
12
+ "sample_max_value": 1.0,
13
+ "set_alpha_to_one": true,
14
+ "snr_shift_scale": 1.0,
15
+ "steps_offset": 0,
16
+ "timestep_spacing": "trailing",
17
+ "trained_betas": null
18
+ }