abubakar123456 commited on
Commit
5eaa703
Β·
verified Β·
1 Parent(s): 9918d3d

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
DensePose/Base-DensePose-RCNN-FPN.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ VERSION: 2
2
+ MODEL:
3
+ META_ARCHITECTURE: "GeneralizedRCNN"
4
+ BACKBONE:
5
+ NAME: "build_resnet_fpn_backbone"
6
+ RESNETS:
7
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
8
+ FPN:
9
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
10
+ ANCHOR_GENERATOR:
11
+ SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
12
+ ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
13
+ RPN:
14
+ IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
15
+ PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
16
+ PRE_NMS_TOPK_TEST: 1000 # Per FPN level
17
+ # Detectron1 uses 2000 proposals per-batch,
18
+ # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
19
+ # which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
20
+ POST_NMS_TOPK_TRAIN: 1000
21
+ POST_NMS_TOPK_TEST: 1000
22
+
23
+ DENSEPOSE_ON: True
24
+ ROI_HEADS:
25
+ NAME: "DensePoseROIHeads"
26
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
27
+ NUM_CLASSES: 1
28
+ ROI_BOX_HEAD:
29
+ NAME: "FastRCNNConvFCHead"
30
+ NUM_FC: 2
31
+ POOLER_RESOLUTION: 7
32
+ POOLER_SAMPLING_RATIO: 2
33
+ POOLER_TYPE: "ROIAlign"
34
+ ROI_DENSEPOSE_HEAD:
35
+ NAME: "DensePoseV1ConvXHead"
36
+ POOLER_TYPE: "ROIAlign"
37
+ NUM_COARSE_SEGM_CHANNELS: 2
38
+ DATASETS:
39
+ TRAIN: ("densepose_coco_2014_train", "densepose_coco_2014_valminusminival")
40
+ TEST: ("densepose_coco_2014_minival",)
41
+ SOLVER:
42
+ IMS_PER_BATCH: 16
43
+ BASE_LR: 0.01
44
+ STEPS: (60000, 80000)
45
+ MAX_ITER: 90000
46
+ WARMUP_FACTOR: 0.1
47
+ INPUT:
48
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
DensePose/densepose_rcnn_R_50_FPN_s1x.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-DensePose-RCNN-FPN.yaml"
2
+ MODEL:
3
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
4
+ RESNETS:
5
+ DEPTH: 50
6
+ SOLVER:
7
+ MAX_ITER: 130000
8
+ STEPS: (100000, 120000)
README.md ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-sa-4.0
3
+ ---
4
+
5
+ # 🐈 CatVTON: Concatenation Is All You Need for Virtual Try-On with Diffusion Models
6
+
7
+ <div style="display: flex; justify-content: center; align-items: center;">
8
+ <a href="http://arxiv.org/abs/2407.15886" style="margin: 0 2px;">
9
+ <img src='https://img.shields.io/badge/arXiv-2407.15886-red?style=flat&logo=arXiv&logoColor=red' alt='arxiv'>
10
+ </a>
11
+ <a href='https://huggingface.co/zhengchong/CatVTON' style="margin: 0 2px;">
12
+ <img src='https://img.shields.io/badge/Hugging Face-ckpts-orange?style=flat&logo=HuggingFace&logoColor=orange' alt='huggingface'>
13
+ </a>
14
+ <a href="https://github.com/Zheng-Chong/CatVTON" style="margin: 0 2px;">
15
+ <img src='https://img.shields.io/badge/GitHub-Repo-blue?style=flat&logo=GitHub' alt='GitHub'>
16
+ </a>
17
+ <a href="http://120.76.142.206:8888" style="margin: 0 2px;">
18
+ <img src='https://img.shields.io/badge/Demo-Gradio-gold?style=flat&logo=Gradio&logoColor=red' alt='Demo'>
19
+ </a>
20
+ <a href="https://huggingface.co/spaces/zhengchong/CatVTON" style="margin: 0 2px;">
21
+ <img src='https://img.shields.io/badge/Space-ZeroGPU-orange?style=flat&logo=Gradio&logoColor=red' alt='Demo'>
22
+ </a>
23
+ <a href='https://zheng-chong.github.io/CatVTON/' style="margin: 0 2px;">
24
+ <img src='https://img.shields.io/badge/Webpage-Project-silver?style=flat&logo=&logoColor=orange' alt='webpage'>
25
+ </a>
26
+ <a href="https://github.com/Zheng-Chong/CatVTON/LICENCE" style="margin: 0 2px;">
27
+ <img src='https://img.shields.io/badge/License-CC BY--NC--SA--4.0-lightgreen?style=flat&logo=Lisence' alt='License'>
28
+ </a>
29
+ </div>
30
+
31
+
32
+
33
+ **CatVTON** is a simple and efficient virtual try-on diffusion model with ***1) Lightweight Network (899.06M parameters totally)***, ***2) Parameter-Efficient Training (49.57M parameters trainable)*** and ***3) Simplified Inference (< 8G VRAM for 1024X768 resolution)***.
34
+
35
+
36
+
37
+ ## Updates
38
+ - **`2024/08/10`**: Our πŸ€— [**HuggingFace Space**](https://huggingface.co/spaces/zhengchong/CatVTON) is available now! Thanks for the grant from [**ZeroGPU**](https://huggingface.co/zero-gpu-explorers)!
39
+ - **`2024/08/09`**: [**Evaluation code**](https://github.com/Zheng-Chong/CatVTON?tab=readme-ov-file#3-calculate-metrics) is provided to calculate metrics πŸ“š.
40
+ - **`2024/07/27`**: We provide code and workflow for deploying CatVTON on [**ComfyUI**](https://github.com/Zheng-Chong/CatVTON?tab=readme-ov-file#comfyui-workflow) πŸ’₯.
41
+ - **`2024/07/24`**: Our [**Paper on ArXiv**](http://arxiv.org/abs/2407.15886) is available πŸ₯³!
42
+ - **`2024/07/22`**: Our [**App Code**](https://github.com/Zheng-Chong/CatVTON/blob/main/app.py) is released, deploy and enjoy CatVTON on your mechine πŸŽ‰!
43
+ - **`2024/07/21`**: Our [**Inference Code**](https://github.com/Zheng-Chong/CatVTON/blob/main/inference.py) and [**Weights** πŸ€—](https://huggingface.co/zhengchong/CatVTON) are released.
44
+ - **`2024/07/11`**: Our [**Online Demo**](http://120.76.142.206:8888) is released 😁.
45
+
46
+
47
+
48
+
49
+ ## Installation
50
+ An [Installation Guide](https://github.com/Zheng-Chong/CatVTON/blob/main/INSTALL.md) is provided to help build the conda environment for CatVTON. When deploying the app, you will need Detectron2 & DensePose, which are not required for inference on datasets. Install the packages according to your needs.
51
+
52
+ ## Deployment
53
+ ### ComfyUI Workflow
54
+ We have modified the main code to enable easy deployment of CatVTON on [ComfyUI](https://github.com/comfyanonymous/ComfyUI). Due to the incompatibility of the code structure, we have released this part in the [Releases](https://github.com/Zheng-Chong/CatVTON/releases/tag/ComfyUI), which includes the code placed under `custom_nodes` of ComfyUI and our workflow JSON files.
55
+
56
+ To deploy CatVTON to your ComfyUI, follow these steps:
57
+ 1. Install all the requirements for both CatVTON and ComfyUI, refer to [Installation Guide for CatVTON](https://github.com/Zheng-Chong/CatVTON/blob/main/INSTALL.md) and [Installation Guide for ComfyUI](https://github.com/comfyanonymous/ComfyUI?tab=readme-ov-file#installing).
58
+ 2. Download [`ComfyUI-CatVTON.zip`](https://github.com/Zheng-Chong/CatVTON/releases/download/ComfyUI/ComfyUI-CatVTON.zip) and unzip it in the `custom_nodes` folder under your ComfyUI project (clone from [ComfyUI](https://github.com/comfyanonymous/ComfyUI)).
59
+ 3. Run the ComfyUI.
60
+ 4. Download [`catvton_workflow.json`](https://github.com/Zheng-Chong/CatVTON/releases/download/ComfyUI/catvton_workflow.json) and drag it into you ComfyUI webpage and enjoy πŸ˜†!
61
+
62
+ > Problems under Windows OS, please refer to [issue#8](https://github.com/Zheng-Chong/CatVTON/issues/8).
63
+ >
64
+ When you run the CatVTON workflow for the first time, the weight files will be automatically downloaded, usually taking dozens of minutes.
65
+
66
+
67
+ <!-- <div align="center">
68
+ <img src="resource/img/comfyui.png" width="100%" height="100%"/>
69
+ </div> -->
70
+
71
+ ### Gradio App
72
+
73
+ To deploy the Gradio App for CatVTON on your machine, run the following command, and checkpoints will be automatically downloaded from HuggingFace.
74
+
75
+ ```PowerShell
76
+ CUDA_VISIBLE_DEVICES=0 python app.py \
77
+ --output_dir="resource/demo/output" \
78
+ --mixed_precision="bf16" \
79
+ --allow_tf32
80
+ ```
81
+ When using `bf16` precision, generating results with a resolution of `1024x768` only requires about `8G` VRAM.
82
+
83
+ ## Inference
84
+ ### 1. Data Preparation
85
+ Before inference, you need to download the [VITON-HD](https://github.com/shadow2496/VITON-HD) or [DressCode](https://github.com/aimagelab/dress-code) dataset.
86
+ Once the datasets are downloaded, the folder structures should look like these:
87
+ ```
88
+ β”œβ”€β”€ VITON-HD
89
+ | β”œβ”€β”€ test_pairs_unpaired.txt
90
+ β”‚ β”œβ”€β”€ test
91
+ | | β”œβ”€β”€ image
92
+ β”‚ β”‚ β”‚ β”œβ”€β”€ [000006_00.jpg | 000008_00.jpg | ...]
93
+ β”‚ β”‚ β”œβ”€β”€ cloth
94
+ β”‚ β”‚ β”‚ β”œβ”€β”€ [000006_00.jpg | 000008_00.jpg | ...]
95
+ β”‚ β”‚ β”œβ”€β”€ agnostic-mask
96
+ β”‚ β”‚ β”‚ β”œβ”€β”€ [000006_00_mask.png | 000008_00.png | ...]
97
+ ...
98
+ ```
99
+ For the DressCode dataset, we provide [our preprocessed agnostic masks](https://drive.google.com/drive/folders/1uT88nYQl0n5qHz6zngb9WxGlX4ArAbVX?usp=share_link), download and place in `agnostic_masks` folders under each category.
100
+ ```
101
+ β”œβ”€β”€ DressCode
102
+ | β”œβ”€β”€ test_pairs_paired.txt
103
+ | β”œβ”€β”€ test_pairs_unpaired.txt
104
+ β”‚ β”œβ”€β”€ [dresses | lower_body | upper_body]
105
+ | | β”œβ”€β”€ test_pairs_paired.txt
106
+ | | β”œβ”€β”€ test_pairs_unpaired.txt
107
+ β”‚ β”‚ β”œβ”€β”€ images
108
+ β”‚ β”‚ β”‚ β”œβ”€β”€ [013563_0.jpg | 013563_1.jpg | 013564_0.jpg | 013564_1.jpg | ...]
109
+ β”‚ β”‚ β”œβ”€β”€ agnostic_masks
110
+ β”‚ β”‚ β”‚ β”œβ”€β”€ [013563_0.png| 013564_0.png | ...]
111
+ ...
112
+ ```
113
+
114
+ ### 2. Inference on VTIONHD/DressCode
115
+ To run the inference on the DressCode or VITON-HD dataset, run the following command, checkpoints will be automatically downloaded from HuggingFace.
116
+
117
+ ```PowerShell
118
+ CUDA_VISIBLE_DEVICES=0 python inference.py \
119
+ --dataset [dresscode | vitonhd] \
120
+ --data_root_path <path> \
121
+ --output_dir <path>
122
+ --dataloader_num_workers 8 \
123
+ --batch_size 8 \
124
+ --seed 555 \
125
+ --mixed_precision [no | fp16 | bf16] \
126
+ --allow_tf32 \
127
+ --repaint \
128
+ --eval_pair
129
+ ```
130
+ ### 3. Calculate Metrics
131
+
132
+ After obtaining the inference results, calculate the metrics using the following command:
133
+
134
+ ```PowerShell
135
+ CUDA_VISIBLE_DEVICES=0 python eval.py \
136
+ --gt_folder <your_path_to_gt_image_folder> \
137
+ --pred_folder <your_path_to_predicted_image_folder> \
138
+ --paired \
139
+ --batch_size=16 \
140
+ --num_workers=16
141
+ ```
142
+
143
+ - `--gt_folder` and `--pred_folder` should be folders that contain **only images**.
144
+ - To evaluate the results in a paired setting, use `--paired`; for an unpaired setting, simply omit it.
145
+ - `--batch_size` and `--num_workers` should be adjusted based on your machine.
146
+
147
+
148
+ ## Acknowledgement
149
+ Our code is modified based on [Diffusers](https://github.com/huggingface/diffusers). We adopt [Stable Diffusion v1.5 inpainting](https://huggingface.co/runwayml/stable-diffusion-inpainting) as the base model. We use [SCHP](https://github.com/GoGoDuck912/Self-Correction-Human-Parsing/tree/master) and [DensePose](https://github.com/facebookresearch/DensePose) to automatically generate masks in our [Gradio](https://github.com/gradio-app/gradio) App and [ComfyUI](https://github.com/comfyanonymous/ComfyUI) workflow. Thanks to all the contributors!
150
+
151
+ ## License
152
+ All the materials, including code, checkpoints, and demo, are made available under the [Creative Commons BY-NC-SA 4.0](https://creativecommons.org/licenses/by-nc-sa/4.0/) license. You are free to copy, redistribute, remix, transform, and build upon the project for non-commercial purposes, as long as you give appropriate credit and distribute your contributions under the same license.
153
+
154
+
155
+ ## Citation
156
+
157
+ ```bibtex
158
+ @misc{chong2024catvtonconcatenationneedvirtual,
159
+ title={CatVTON: Concatenation Is All You Need for Virtual Try-On with Diffusion Models},
160
+ author={Zheng Chong and Xiao Dong and Haoxiang Li and Shiyue Zhang and Wenqing Zhang and Xujie Zhang and Hanqing Zhao and Xiaodan Liang},
161
+ year={2024},
162
+ eprint={2407.15886},
163
+ archivePrefix={arXiv},
164
+ primaryClass={cs.CV},
165
+ url={https://arxiv.org/abs/2407.15886},
166
+ }
167
+ ```