Spaces:
Running
on
Zero
Running
on
Zero
FIx
#18
by
hysts
HF staff
- opened
- README.md +2 -2
- app.py +12 -9
- ckpt/controlnet-tile/config.json +0 -52
- ckpt/controlnet-tile/diffusion_pytorch_model.safetensors +0 -3
- ckpt/image2normal/feature_extractor/preprocessor_config.json +0 -44
- ckpt/image2normal/image_encoder/config.json +0 -23
- ckpt/image2normal/image_encoder/model.safetensors +0 -3
- ckpt/image2normal/model_index.json +0 -31
- ckpt/image2normal/scheduler/scheduler_config.json +0 -16
- ckpt/image2normal/unet/config.json +0 -68
- ckpt/image2normal/unet/diffusion_pytorch_model.safetensors +0 -3
- ckpt/image2normal/unet_state_dict.pth +0 -3
- ckpt/image2normal/vae/config.json +0 -34
- ckpt/image2normal/vae/diffusion_pytorch_model.safetensors +0 -3
- ckpt/img2mvimg/feature_extractor/preprocessor_config.json +0 -44
- ckpt/img2mvimg/image_encoder/config.json +0 -23
- ckpt/img2mvimg/image_encoder/model.safetensors +0 -3
- ckpt/img2mvimg/model_index.json +0 -31
- ckpt/img2mvimg/scheduler/scheduler_config.json +0 -20
- ckpt/img2mvimg/unet/config.json +0 -68
- ckpt/img2mvimg/unet/diffusion_pytorch_model.safetensors +0 -3
- ckpt/img2mvimg/unet_state_dict.pth +0 -3
- ckpt/img2mvimg/vae/config.json +0 -34
- ckpt/img2mvimg/vae/diffusion_pytorch_model.safetensors +0 -3
- ckpt/realesrgan-x4.onnx +0 -3
- ckpt/v1-inference.yaml +0 -70
- custum_3d_diffusion/custum_modules/unifield_processor.py +1 -1
- gradio_app/all_models.py +1 -1
- requirements.txt +2 -3
- scripts/sd_model_zoo.py +1 -1
README.md
CHANGED
@@ -5,11 +5,11 @@ colorFrom: red
|
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
python_version: 3.10.8
|
8 |
-
sdk_version:
|
9 |
app_file: app.py
|
10 |
pinned: true
|
11 |
short_description: Create a 1M faces 3D colored model from an image!
|
12 |
license: mit
|
13 |
---
|
14 |
|
15 |
-
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
python_version: 3.10.8
|
8 |
+
sdk_version: 5.15.0
|
9 |
app_file: app.py
|
10 |
pinned: true
|
11 |
short_description: Create a 1M faces 3D colored model from an image!
|
12 |
license: mit
|
13 |
---
|
14 |
|
15 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -1,22 +1,25 @@
|
|
1 |
import shlex
|
2 |
import subprocess
|
|
|
|
|
|
|
3 |
subprocess.run(
|
4 |
shlex.split(
|
5 |
"pip install package/onnxruntime_gpu-1.17.0-cp310-cp310-manylinux_2_28_x86_64.whl --force-reinstall --no-deps"
|
6 |
-
)
|
7 |
)
|
8 |
subprocess.run(
|
9 |
shlex.split(
|
10 |
"pip install package/nvdiffrast-0.3.1.torch-cp310-cp310-linux_x86_64.whl --force-reinstall --no-deps"
|
11 |
-
)
|
12 |
)
|
13 |
|
|
|
14 |
if __name__ == "__main__":
|
15 |
-
import
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
import os
|
21 |
import sys
|
22 |
sys.path.append(os.curdir)
|
@@ -51,7 +54,7 @@ _DESCRIPTION = '''
|
|
51 |
|
52 |
def launch():
|
53 |
model_zoo.init_models()
|
54 |
-
|
55 |
with gr.Blocks(
|
56 |
title=_TITLE,
|
57 |
# theme=gr.themes.Monochrome(),
|
@@ -63,6 +66,6 @@ def launch():
|
|
63 |
create_3d_ui("wkl")
|
64 |
|
65 |
demo.queue().launch(share=True)
|
66 |
-
|
67 |
if __name__ == '__main__':
|
68 |
fire.Fire(launch)
|
|
|
1 |
import shlex
|
2 |
import subprocess
|
3 |
+
|
4 |
+
|
5 |
+
subprocess.run(shlex.split("pip install pip==24.0"), check=True)
|
6 |
subprocess.run(
|
7 |
shlex.split(
|
8 |
"pip install package/onnxruntime_gpu-1.17.0-cp310-cp310-manylinux_2_28_x86_64.whl --force-reinstall --no-deps"
|
9 |
+
), check=True
|
10 |
)
|
11 |
subprocess.run(
|
12 |
shlex.split(
|
13 |
"pip install package/nvdiffrast-0.3.1.torch-cp310-cp310-linux_x86_64.whl --force-reinstall --no-deps"
|
14 |
+
), check=True
|
15 |
)
|
16 |
|
17 |
+
|
18 |
if __name__ == "__main__":
|
19 |
+
from huggingface_hub import snapshot_download
|
20 |
+
|
21 |
+
snapshot_download("Wuvin/Unique3D", repo_type="model", local_dir="./ckpt")
|
22 |
+
|
|
|
23 |
import os
|
24 |
import sys
|
25 |
sys.path.append(os.curdir)
|
|
|
54 |
|
55 |
def launch():
|
56 |
model_zoo.init_models()
|
57 |
+
|
58 |
with gr.Blocks(
|
59 |
title=_TITLE,
|
60 |
# theme=gr.themes.Monochrome(),
|
|
|
66 |
create_3d_ui("wkl")
|
67 |
|
68 |
demo.queue().launch(share=True)
|
69 |
+
|
70 |
if __name__ == '__main__':
|
71 |
fire.Fire(launch)
|
ckpt/controlnet-tile/config.json
DELETED
@@ -1,52 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_class_name": "ControlNetModel",
|
3 |
-
"_diffusers_version": "0.27.2",
|
4 |
-
"_name_or_path": "lllyasviel/control_v11f1e_sd15_tile",
|
5 |
-
"act_fn": "silu",
|
6 |
-
"addition_embed_type": null,
|
7 |
-
"addition_embed_type_num_heads": 64,
|
8 |
-
"addition_time_embed_dim": null,
|
9 |
-
"attention_head_dim": 8,
|
10 |
-
"block_out_channels": [
|
11 |
-
320,
|
12 |
-
640,
|
13 |
-
1280,
|
14 |
-
1280
|
15 |
-
],
|
16 |
-
"class_embed_type": null,
|
17 |
-
"conditioning_channels": 3,
|
18 |
-
"conditioning_embedding_out_channels": [
|
19 |
-
16,
|
20 |
-
32,
|
21 |
-
96,
|
22 |
-
256
|
23 |
-
],
|
24 |
-
"controlnet_conditioning_channel_order": "rgb",
|
25 |
-
"cross_attention_dim": 768,
|
26 |
-
"down_block_types": [
|
27 |
-
"CrossAttnDownBlock2D",
|
28 |
-
"CrossAttnDownBlock2D",
|
29 |
-
"CrossAttnDownBlock2D",
|
30 |
-
"DownBlock2D"
|
31 |
-
],
|
32 |
-
"downsample_padding": 1,
|
33 |
-
"encoder_hid_dim": null,
|
34 |
-
"encoder_hid_dim_type": null,
|
35 |
-
"flip_sin_to_cos": true,
|
36 |
-
"freq_shift": 0,
|
37 |
-
"global_pool_conditions": false,
|
38 |
-
"in_channels": 4,
|
39 |
-
"layers_per_block": 2,
|
40 |
-
"mid_block_scale_factor": 1,
|
41 |
-
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
-
"norm_eps": 1e-05,
|
43 |
-
"norm_num_groups": 32,
|
44 |
-
"num_attention_heads": null,
|
45 |
-
"num_class_embeds": null,
|
46 |
-
"only_cross_attention": false,
|
47 |
-
"projection_class_embeddings_input_dim": null,
|
48 |
-
"resnet_time_scale_shift": "default",
|
49 |
-
"transformer_layers_per_block": 1,
|
50 |
-
"upcast_attention": false,
|
51 |
-
"use_linear_projection": false
|
52 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/controlnet-tile/diffusion_pytorch_model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:845d3845053912728cd1453029a0ef87d3c0a3082a083ba393f36eaa5fb0e218
|
3 |
-
size 1445157120
|
|
|
|
|
|
|
|
ckpt/image2normal/feature_extractor/preprocessor_config.json
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_valid_processor_keys": [
|
3 |
-
"images",
|
4 |
-
"do_resize",
|
5 |
-
"size",
|
6 |
-
"resample",
|
7 |
-
"do_center_crop",
|
8 |
-
"crop_size",
|
9 |
-
"do_rescale",
|
10 |
-
"rescale_factor",
|
11 |
-
"do_normalize",
|
12 |
-
"image_mean",
|
13 |
-
"image_std",
|
14 |
-
"do_convert_rgb",
|
15 |
-
"return_tensors",
|
16 |
-
"data_format",
|
17 |
-
"input_data_format"
|
18 |
-
],
|
19 |
-
"crop_size": {
|
20 |
-
"height": 224,
|
21 |
-
"width": 224
|
22 |
-
},
|
23 |
-
"do_center_crop": true,
|
24 |
-
"do_convert_rgb": true,
|
25 |
-
"do_normalize": true,
|
26 |
-
"do_rescale": true,
|
27 |
-
"do_resize": true,
|
28 |
-
"image_mean": [
|
29 |
-
0.48145466,
|
30 |
-
0.4578275,
|
31 |
-
0.40821073
|
32 |
-
],
|
33 |
-
"image_processor_type": "CLIPImageProcessor",
|
34 |
-
"image_std": [
|
35 |
-
0.26862954,
|
36 |
-
0.26130258,
|
37 |
-
0.27577711
|
38 |
-
],
|
39 |
-
"resample": 3,
|
40 |
-
"rescale_factor": 0.00392156862745098,
|
41 |
-
"size": {
|
42 |
-
"shortest_edge": 224
|
43 |
-
}
|
44 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/image2normal/image_encoder/config.json
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_name_or_path": "lambdalabs/sd-image-variations-diffusers",
|
3 |
-
"architectures": [
|
4 |
-
"CLIPVisionModelWithProjection"
|
5 |
-
],
|
6 |
-
"attention_dropout": 0.0,
|
7 |
-
"dropout": 0.0,
|
8 |
-
"hidden_act": "quick_gelu",
|
9 |
-
"hidden_size": 1024,
|
10 |
-
"image_size": 224,
|
11 |
-
"initializer_factor": 1.0,
|
12 |
-
"initializer_range": 0.02,
|
13 |
-
"intermediate_size": 4096,
|
14 |
-
"layer_norm_eps": 1e-05,
|
15 |
-
"model_type": "clip_vision_model",
|
16 |
-
"num_attention_heads": 16,
|
17 |
-
"num_channels": 3,
|
18 |
-
"num_hidden_layers": 24,
|
19 |
-
"patch_size": 14,
|
20 |
-
"projection_dim": 768,
|
21 |
-
"torch_dtype": "bfloat16",
|
22 |
-
"transformers_version": "4.39.3"
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/image2normal/image_encoder/model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e4b33d864f89a793357a768cb07d0dc18d6a14e6664f4110a0d535ca9ba78da8
|
3 |
-
size 607980488
|
|
|
|
|
|
|
|
ckpt/image2normal/model_index.json
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_class_name": "StableDiffusionImageCustomPipeline",
|
3 |
-
"_diffusers_version": "0.27.2",
|
4 |
-
"_name_or_path": "lambdalabs/sd-image-variations-diffusers",
|
5 |
-
"feature_extractor": [
|
6 |
-
"transformers",
|
7 |
-
"CLIPImageProcessor"
|
8 |
-
],
|
9 |
-
"image_encoder": [
|
10 |
-
"transformers",
|
11 |
-
"CLIPVisionModelWithProjection"
|
12 |
-
],
|
13 |
-
"noisy_cond_latents": false,
|
14 |
-
"requires_safety_checker": true,
|
15 |
-
"safety_checker": [
|
16 |
-
null,
|
17 |
-
null
|
18 |
-
],
|
19 |
-
"scheduler": [
|
20 |
-
"diffusers",
|
21 |
-
"EulerAncestralDiscreteScheduler"
|
22 |
-
],
|
23 |
-
"unet": [
|
24 |
-
"diffusers",
|
25 |
-
"UNet2DConditionModel"
|
26 |
-
],
|
27 |
-
"vae": [
|
28 |
-
"diffusers",
|
29 |
-
"AutoencoderKL"
|
30 |
-
]
|
31 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/image2normal/scheduler/scheduler_config.json
DELETED
@@ -1,16 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_class_name": "EulerAncestralDiscreteScheduler",
|
3 |
-
"_diffusers_version": "0.27.2",
|
4 |
-
"beta_end": 0.012,
|
5 |
-
"beta_schedule": "scaled_linear",
|
6 |
-
"beta_start": 0.00085,
|
7 |
-
"clip_sample": false,
|
8 |
-
"num_train_timesteps": 1000,
|
9 |
-
"prediction_type": "epsilon",
|
10 |
-
"rescale_betas_zero_snr": false,
|
11 |
-
"set_alpha_to_one": false,
|
12 |
-
"skip_prk_steps": true,
|
13 |
-
"steps_offset": 1,
|
14 |
-
"timestep_spacing": "linspace",
|
15 |
-
"trained_betas": null
|
16 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/image2normal/unet/config.json
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_class_name": "UnifieldWrappedUNet",
|
3 |
-
"_diffusers_version": "0.27.2",
|
4 |
-
"_name_or_path": "lambdalabs/sd-image-variations-diffusers",
|
5 |
-
"act_fn": "silu",
|
6 |
-
"addition_embed_type": null,
|
7 |
-
"addition_embed_type_num_heads": 64,
|
8 |
-
"addition_time_embed_dim": null,
|
9 |
-
"attention_head_dim": 8,
|
10 |
-
"attention_type": "default",
|
11 |
-
"block_out_channels": [
|
12 |
-
320,
|
13 |
-
640,
|
14 |
-
1280,
|
15 |
-
1280
|
16 |
-
],
|
17 |
-
"center_input_sample": false,
|
18 |
-
"class_embed_type": null,
|
19 |
-
"class_embeddings_concat": false,
|
20 |
-
"conv_in_kernel": 3,
|
21 |
-
"conv_out_kernel": 3,
|
22 |
-
"cross_attention_dim": 768,
|
23 |
-
"cross_attention_norm": null,
|
24 |
-
"down_block_types": [
|
25 |
-
"CrossAttnDownBlock2D",
|
26 |
-
"CrossAttnDownBlock2D",
|
27 |
-
"CrossAttnDownBlock2D",
|
28 |
-
"DownBlock2D"
|
29 |
-
],
|
30 |
-
"downsample_padding": 1,
|
31 |
-
"dropout": 0.0,
|
32 |
-
"dual_cross_attention": false,
|
33 |
-
"encoder_hid_dim": null,
|
34 |
-
"encoder_hid_dim_type": null,
|
35 |
-
"flip_sin_to_cos": true,
|
36 |
-
"freq_shift": 0,
|
37 |
-
"in_channels": 4,
|
38 |
-
"layers_per_block": 2,
|
39 |
-
"mid_block_only_cross_attention": null,
|
40 |
-
"mid_block_scale_factor": 1,
|
41 |
-
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
-
"norm_eps": 1e-05,
|
43 |
-
"norm_num_groups": 32,
|
44 |
-
"num_attention_heads": null,
|
45 |
-
"num_class_embeds": null,
|
46 |
-
"only_cross_attention": false,
|
47 |
-
"out_channels": 4,
|
48 |
-
"projection_class_embeddings_input_dim": null,
|
49 |
-
"resnet_out_scale_factor": 1.0,
|
50 |
-
"resnet_skip_time_act": false,
|
51 |
-
"resnet_time_scale_shift": "default",
|
52 |
-
"reverse_transformer_layers_per_block": null,
|
53 |
-
"sample_size": 64,
|
54 |
-
"time_cond_proj_dim": null,
|
55 |
-
"time_embedding_act_fn": null,
|
56 |
-
"time_embedding_dim": null,
|
57 |
-
"time_embedding_type": "positional",
|
58 |
-
"timestep_post_act": null,
|
59 |
-
"transformer_layers_per_block": 1,
|
60 |
-
"up_block_types": [
|
61 |
-
"UpBlock2D",
|
62 |
-
"CrossAttnUpBlock2D",
|
63 |
-
"CrossAttnUpBlock2D",
|
64 |
-
"CrossAttnUpBlock2D"
|
65 |
-
],
|
66 |
-
"upcast_attention": false,
|
67 |
-
"use_linear_projection": false
|
68 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/image2normal/unet/diffusion_pytorch_model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:f5cbaf1d56619345ce78de8cfbb20d94923b3305a364bf6a5b2a2cc422d4b701
|
3 |
-
size 3537503456
|
|
|
|
|
|
|
|
ckpt/image2normal/unet_state_dict.pth
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:8df80d09e953d338aa6d8decd0351c5045f52ec6e2645eee2027ccb8792c8ed8
|
3 |
-
size 3537964654
|
|
|
|
|
|
|
|
ckpt/image2normal/vae/config.json
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_class_name": "AutoencoderKL",
|
3 |
-
"_diffusers_version": "0.27.2",
|
4 |
-
"_name_or_path": "lambdalabs/sd-image-variations-diffusers",
|
5 |
-
"act_fn": "silu",
|
6 |
-
"block_out_channels": [
|
7 |
-
128,
|
8 |
-
256,
|
9 |
-
512,
|
10 |
-
512
|
11 |
-
],
|
12 |
-
"down_block_types": [
|
13 |
-
"DownEncoderBlock2D",
|
14 |
-
"DownEncoderBlock2D",
|
15 |
-
"DownEncoderBlock2D",
|
16 |
-
"DownEncoderBlock2D"
|
17 |
-
],
|
18 |
-
"force_upcast": true,
|
19 |
-
"in_channels": 3,
|
20 |
-
"latent_channels": 4,
|
21 |
-
"latents_mean": null,
|
22 |
-
"latents_std": null,
|
23 |
-
"layers_per_block": 2,
|
24 |
-
"norm_num_groups": 32,
|
25 |
-
"out_channels": 3,
|
26 |
-
"sample_size": 256,
|
27 |
-
"scaling_factor": 0.18215,
|
28 |
-
"up_block_types": [
|
29 |
-
"UpDecoderBlock2D",
|
30 |
-
"UpDecoderBlock2D",
|
31 |
-
"UpDecoderBlock2D",
|
32 |
-
"UpDecoderBlock2D"
|
33 |
-
]
|
34 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/image2normal/vae/diffusion_pytorch_model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:8d0c34f57abe50f323040f2366c8e22b941068dcdf53c8eb1d6fafb838afecb7
|
3 |
-
size 167335590
|
|
|
|
|
|
|
|
ckpt/img2mvimg/feature_extractor/preprocessor_config.json
DELETED
@@ -1,44 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_valid_processor_keys": [
|
3 |
-
"images",
|
4 |
-
"do_resize",
|
5 |
-
"size",
|
6 |
-
"resample",
|
7 |
-
"do_center_crop",
|
8 |
-
"crop_size",
|
9 |
-
"do_rescale",
|
10 |
-
"rescale_factor",
|
11 |
-
"do_normalize",
|
12 |
-
"image_mean",
|
13 |
-
"image_std",
|
14 |
-
"do_convert_rgb",
|
15 |
-
"return_tensors",
|
16 |
-
"data_format",
|
17 |
-
"input_data_format"
|
18 |
-
],
|
19 |
-
"crop_size": {
|
20 |
-
"height": 224,
|
21 |
-
"width": 224
|
22 |
-
},
|
23 |
-
"do_center_crop": true,
|
24 |
-
"do_convert_rgb": true,
|
25 |
-
"do_normalize": true,
|
26 |
-
"do_rescale": true,
|
27 |
-
"do_resize": true,
|
28 |
-
"image_mean": [
|
29 |
-
0.48145466,
|
30 |
-
0.4578275,
|
31 |
-
0.40821073
|
32 |
-
],
|
33 |
-
"image_processor_type": "CLIPImageProcessor",
|
34 |
-
"image_std": [
|
35 |
-
0.26862954,
|
36 |
-
0.26130258,
|
37 |
-
0.27577711
|
38 |
-
],
|
39 |
-
"resample": 3,
|
40 |
-
"rescale_factor": 0.00392156862745098,
|
41 |
-
"size": {
|
42 |
-
"shortest_edge": 224
|
43 |
-
}
|
44 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/img2mvimg/image_encoder/config.json
DELETED
@@ -1,23 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_name_or_path": "lambdalabs/sd-image-variations-diffusers",
|
3 |
-
"architectures": [
|
4 |
-
"CLIPVisionModelWithProjection"
|
5 |
-
],
|
6 |
-
"attention_dropout": 0.0,
|
7 |
-
"dropout": 0.0,
|
8 |
-
"hidden_act": "quick_gelu",
|
9 |
-
"hidden_size": 1024,
|
10 |
-
"image_size": 224,
|
11 |
-
"initializer_factor": 1.0,
|
12 |
-
"initializer_range": 0.02,
|
13 |
-
"intermediate_size": 4096,
|
14 |
-
"layer_norm_eps": 1e-05,
|
15 |
-
"model_type": "clip_vision_model",
|
16 |
-
"num_attention_heads": 16,
|
17 |
-
"num_channels": 3,
|
18 |
-
"num_hidden_layers": 24,
|
19 |
-
"patch_size": 14,
|
20 |
-
"projection_dim": 768,
|
21 |
-
"torch_dtype": "float32",
|
22 |
-
"transformers_version": "4.39.3"
|
23 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/img2mvimg/image_encoder/model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:77b33d2a3a643650857672e880ccf73adbaf114fbbadec36d142ee9d48af7e20
|
3 |
-
size 1215912728
|
|
|
|
|
|
|
|
ckpt/img2mvimg/model_index.json
DELETED
@@ -1,31 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_class_name": "StableDiffusionImage2MVCustomPipeline",
|
3 |
-
"_diffusers_version": "0.27.2",
|
4 |
-
"_name_or_path": "lambdalabs/sd-image-variations-diffusers",
|
5 |
-
"condition_offset": true,
|
6 |
-
"feature_extractor": [
|
7 |
-
"transformers",
|
8 |
-
"CLIPImageProcessor"
|
9 |
-
],
|
10 |
-
"image_encoder": [
|
11 |
-
"transformers",
|
12 |
-
"CLIPVisionModelWithProjection"
|
13 |
-
],
|
14 |
-
"requires_safety_checker": true,
|
15 |
-
"safety_checker": [
|
16 |
-
null,
|
17 |
-
null
|
18 |
-
],
|
19 |
-
"scheduler": [
|
20 |
-
"diffusers",
|
21 |
-
"DDIMScheduler"
|
22 |
-
],
|
23 |
-
"unet": [
|
24 |
-
"diffusers",
|
25 |
-
"UNet2DConditionModel"
|
26 |
-
],
|
27 |
-
"vae": [
|
28 |
-
"diffusers",
|
29 |
-
"AutoencoderKL"
|
30 |
-
]
|
31 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/img2mvimg/scheduler/scheduler_config.json
DELETED
@@ -1,20 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_class_name": "DDIMScheduler",
|
3 |
-
"_diffusers_version": "0.27.2",
|
4 |
-
"beta_end": 0.012,
|
5 |
-
"beta_schedule": "scaled_linear",
|
6 |
-
"beta_start": 0.00085,
|
7 |
-
"clip_sample": false,
|
8 |
-
"clip_sample_range": 1.0,
|
9 |
-
"dynamic_thresholding_ratio": 0.995,
|
10 |
-
"num_train_timesteps": 1000,
|
11 |
-
"prediction_type": "epsilon",
|
12 |
-
"rescale_betas_zero_snr": false,
|
13 |
-
"sample_max_value": 1.0,
|
14 |
-
"set_alpha_to_one": false,
|
15 |
-
"skip_prk_steps": true,
|
16 |
-
"steps_offset": 1,
|
17 |
-
"thresholding": false,
|
18 |
-
"timestep_spacing": "leading",
|
19 |
-
"trained_betas": null
|
20 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/img2mvimg/unet/config.json
DELETED
@@ -1,68 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_class_name": "UnifieldWrappedUNet",
|
3 |
-
"_diffusers_version": "0.27.2",
|
4 |
-
"_name_or_path": "lambdalabs/sd-image-variations-diffusers",
|
5 |
-
"act_fn": "silu",
|
6 |
-
"addition_embed_type": null,
|
7 |
-
"addition_embed_type_num_heads": 64,
|
8 |
-
"addition_time_embed_dim": null,
|
9 |
-
"attention_head_dim": 8,
|
10 |
-
"attention_type": "default",
|
11 |
-
"block_out_channels": [
|
12 |
-
320,
|
13 |
-
640,
|
14 |
-
1280,
|
15 |
-
1280
|
16 |
-
],
|
17 |
-
"center_input_sample": false,
|
18 |
-
"class_embed_type": null,
|
19 |
-
"class_embeddings_concat": false,
|
20 |
-
"conv_in_kernel": 3,
|
21 |
-
"conv_out_kernel": 3,
|
22 |
-
"cross_attention_dim": 768,
|
23 |
-
"cross_attention_norm": null,
|
24 |
-
"down_block_types": [
|
25 |
-
"CrossAttnDownBlock2D",
|
26 |
-
"CrossAttnDownBlock2D",
|
27 |
-
"CrossAttnDownBlock2D",
|
28 |
-
"DownBlock2D"
|
29 |
-
],
|
30 |
-
"downsample_padding": 1,
|
31 |
-
"dropout": 0.0,
|
32 |
-
"dual_cross_attention": false,
|
33 |
-
"encoder_hid_dim": null,
|
34 |
-
"encoder_hid_dim_type": null,
|
35 |
-
"flip_sin_to_cos": true,
|
36 |
-
"freq_shift": 0,
|
37 |
-
"in_channels": 8,
|
38 |
-
"layers_per_block": 2,
|
39 |
-
"mid_block_only_cross_attention": null,
|
40 |
-
"mid_block_scale_factor": 1,
|
41 |
-
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
42 |
-
"norm_eps": 1e-05,
|
43 |
-
"norm_num_groups": 32,
|
44 |
-
"num_attention_heads": null,
|
45 |
-
"num_class_embeds": 8,
|
46 |
-
"only_cross_attention": false,
|
47 |
-
"out_channels": 4,
|
48 |
-
"projection_class_embeddings_input_dim": null,
|
49 |
-
"resnet_out_scale_factor": 1.0,
|
50 |
-
"resnet_skip_time_act": false,
|
51 |
-
"resnet_time_scale_shift": "default",
|
52 |
-
"reverse_transformer_layers_per_block": null,
|
53 |
-
"sample_size": 64,
|
54 |
-
"time_cond_proj_dim": null,
|
55 |
-
"time_embedding_act_fn": null,
|
56 |
-
"time_embedding_dim": null,
|
57 |
-
"time_embedding_type": "positional",
|
58 |
-
"timestep_post_act": null,
|
59 |
-
"transformer_layers_per_block": 1,
|
60 |
-
"up_block_types": [
|
61 |
-
"UpBlock2D",
|
62 |
-
"CrossAttnUpBlock2D",
|
63 |
-
"CrossAttnUpBlock2D",
|
64 |
-
"CrossAttnUpBlock2D"
|
65 |
-
],
|
66 |
-
"upcast_attention": false,
|
67 |
-
"use_linear_projection": false
|
68 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/img2mvimg/unet/diffusion_pytorch_model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:93a3b4e678efac0c997e76df465df13136a4b0f1732e534a1200fad9e04cd0f9
|
3 |
-
size 3438254688
|
|
|
|
|
|
|
|
ckpt/img2mvimg/unet_state_dict.pth
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:0dff2fdba450af0e10c3a847ba66a530170be2e9b9c9f4c834483515e82738b5
|
3 |
-
size 3438460972
|
|
|
|
|
|
|
|
ckpt/img2mvimg/vae/config.json
DELETED
@@ -1,34 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_class_name": "AutoencoderKL",
|
3 |
-
"_diffusers_version": "0.27.2",
|
4 |
-
"_name_or_path": "lambdalabs/sd-image-variations-diffusers",
|
5 |
-
"act_fn": "silu",
|
6 |
-
"block_out_channels": [
|
7 |
-
128,
|
8 |
-
256,
|
9 |
-
512,
|
10 |
-
512
|
11 |
-
],
|
12 |
-
"down_block_types": [
|
13 |
-
"DownEncoderBlock2D",
|
14 |
-
"DownEncoderBlock2D",
|
15 |
-
"DownEncoderBlock2D",
|
16 |
-
"DownEncoderBlock2D"
|
17 |
-
],
|
18 |
-
"force_upcast": true,
|
19 |
-
"in_channels": 3,
|
20 |
-
"latent_channels": 4,
|
21 |
-
"latents_mean": null,
|
22 |
-
"latents_std": null,
|
23 |
-
"layers_per_block": 2,
|
24 |
-
"norm_num_groups": 32,
|
25 |
-
"out_channels": 3,
|
26 |
-
"sample_size": 256,
|
27 |
-
"scaling_factor": 0.18215,
|
28 |
-
"up_block_types": [
|
29 |
-
"UpDecoderBlock2D",
|
30 |
-
"UpDecoderBlock2D",
|
31 |
-
"UpDecoderBlock2D",
|
32 |
-
"UpDecoderBlock2D"
|
33 |
-
]
|
34 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ckpt/img2mvimg/vae/diffusion_pytorch_model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:2aa1f43011b553a4cba7f37456465cdbd48aab7b54b9348b890e8058ea7683ec
|
3 |
-
size 334643268
|
|
|
|
|
|
|
|
ckpt/realesrgan-x4.onnx
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:9bc5d0c85207adad8bca26286f0c0007f266f85e7aa7c454c565da9b5f3c940a
|
3 |
-
size 67051617
|
|
|
|
|
|
|
|
ckpt/v1-inference.yaml
DELETED
@@ -1,70 +0,0 @@
|
|
1 |
-
model:
|
2 |
-
base_learning_rate: 1.0e-04
|
3 |
-
target: ldm.models.diffusion.ddpm.LatentDiffusion
|
4 |
-
params:
|
5 |
-
linear_start: 0.00085
|
6 |
-
linear_end: 0.0120
|
7 |
-
num_timesteps_cond: 1
|
8 |
-
log_every_t: 200
|
9 |
-
timesteps: 1000
|
10 |
-
first_stage_key: "jpg"
|
11 |
-
cond_stage_key: "txt"
|
12 |
-
image_size: 64
|
13 |
-
channels: 4
|
14 |
-
cond_stage_trainable: false # Note: different from the one we trained before
|
15 |
-
conditioning_key: crossattn
|
16 |
-
monitor: val/loss_simple_ema
|
17 |
-
scale_factor: 0.18215
|
18 |
-
use_ema: False
|
19 |
-
|
20 |
-
scheduler_config: # 10000 warmup steps
|
21 |
-
target: ldm.lr_scheduler.LambdaLinearScheduler
|
22 |
-
params:
|
23 |
-
warm_up_steps: [ 10000 ]
|
24 |
-
cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases
|
25 |
-
f_start: [ 1.e-6 ]
|
26 |
-
f_max: [ 1. ]
|
27 |
-
f_min: [ 1. ]
|
28 |
-
|
29 |
-
unet_config:
|
30 |
-
target: ldm.modules.diffusionmodules.openaimodel.UNetModel
|
31 |
-
params:
|
32 |
-
image_size: 32 # unused
|
33 |
-
in_channels: 4
|
34 |
-
out_channels: 4
|
35 |
-
model_channels: 320
|
36 |
-
attention_resolutions: [ 4, 2, 1 ]
|
37 |
-
num_res_blocks: 2
|
38 |
-
channel_mult: [ 1, 2, 4, 4 ]
|
39 |
-
num_heads: 8
|
40 |
-
use_spatial_transformer: True
|
41 |
-
transformer_depth: 1
|
42 |
-
context_dim: 768
|
43 |
-
use_checkpoint: True
|
44 |
-
legacy: False
|
45 |
-
|
46 |
-
first_stage_config:
|
47 |
-
target: ldm.models.autoencoder.AutoencoderKL
|
48 |
-
params:
|
49 |
-
embed_dim: 4
|
50 |
-
monitor: val/rec_loss
|
51 |
-
ddconfig:
|
52 |
-
double_z: true
|
53 |
-
z_channels: 4
|
54 |
-
resolution: 256
|
55 |
-
in_channels: 3
|
56 |
-
out_ch: 3
|
57 |
-
ch: 128
|
58 |
-
ch_mult:
|
59 |
-
- 1
|
60 |
-
- 2
|
61 |
-
- 4
|
62 |
-
- 4
|
63 |
-
num_res_blocks: 2
|
64 |
-
attn_resolutions: []
|
65 |
-
dropout: 0.0
|
66 |
-
lossconfig:
|
67 |
-
target: torch.nn.Identity
|
68 |
-
|
69 |
-
cond_stage_config:
|
70 |
-
target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
custum_3d_diffusion/custum_modules/unifield_processor.py
CHANGED
@@ -70,7 +70,7 @@ class AttnConfig:
|
|
70 |
use_mv_joint_attn: bool = False
|
71 |
|
72 |
# for unet
|
73 |
-
init_unet_path: str = "
|
74 |
init_num_cls_label: int = 0 # for initialize
|
75 |
cls_labels: List[int] = field(default_factory=lambda: [])
|
76 |
cls_label_type: str = "embedding"
|
|
|
70 |
use_mv_joint_attn: bool = False
|
71 |
|
72 |
# for unet
|
73 |
+
init_unet_path: str = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
74 |
init_num_cls_label: int = 0 # for initialize
|
75 |
cls_labels: List[int] = field(default_factory=lambda: [])
|
76 |
cls_label_type: str = "embedding"
|
gradio_app/all_models.py
CHANGED
@@ -6,7 +6,7 @@ from diffusers import StableDiffusionControlNetImg2ImgPipeline, StableDiffusionP
|
|
6 |
class MyModelZoo:
|
7 |
_pipe_disney_controlnet_lineart_ipadapter_i2i: StableDiffusionControlNetImg2ImgPipeline = None
|
8 |
|
9 |
-
base_model = "
|
10 |
|
11 |
def __init__(self, base_model=None) -> None:
|
12 |
if base_model is not None:
|
|
|
6 |
class MyModelZoo:
|
7 |
_pipe_disney_controlnet_lineart_ipadapter_i2i: StableDiffusionControlNetImg2ImgPipeline = None
|
8 |
|
9 |
+
base_model = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
10 |
|
11 |
def __init__(self, base_model=None) -> None:
|
12 |
if base_model is not None:
|
requirements.txt
CHANGED
@@ -9,7 +9,7 @@ fire
|
|
9 |
gradio
|
10 |
jaxtyping
|
11 |
numba
|
12 |
-
numpy
|
13 |
omegaconf>=2.3.0
|
14 |
opencv_python
|
15 |
opencv_python_headless
|
@@ -18,7 +18,6 @@ Pillow
|
|
18 |
pygltflib
|
19 |
pymeshlab>=2023.12
|
20 |
rembg[gpu]
|
21 |
-
torch>=2.0.1
|
22 |
torch_scatter @ https://data.pyg.org/whl/torch-2.2.0%2Bcu121/torch_scatter-2.1.2%2Bpt22cu121-cp310-cp310-linux_x86_64.whl
|
23 |
tqdm
|
24 |
transformers
|
@@ -26,4 +25,4 @@ trimesh
|
|
26 |
typeguard
|
27 |
wandb
|
28 |
xformers
|
29 |
-
ninja
|
|
|
9 |
gradio
|
10 |
jaxtyping
|
11 |
numba
|
12 |
+
numpy<2
|
13 |
omegaconf>=2.3.0
|
14 |
opencv_python
|
15 |
opencv_python_headless
|
|
|
18 |
pygltflib
|
19 |
pymeshlab>=2023.12
|
20 |
rembg[gpu]
|
|
|
21 |
torch_scatter @ https://data.pyg.org/whl/torch-2.2.0%2Bcu121/torch_scatter-2.1.2%2Bpt22cu121-cp310-cp310-linux_x86_64.whl
|
22 |
tqdm
|
23 |
transformers
|
|
|
25 |
typeguard
|
26 |
wandb
|
27 |
xformers
|
28 |
+
ninja
|
scripts/sd_model_zoo.py
CHANGED
@@ -4,7 +4,7 @@ import torch
|
|
4 |
from copy import deepcopy
|
5 |
|
6 |
ENABLE_CPU_CACHE = False
|
7 |
-
DEFAULT_BASE_MODEL = "
|
8 |
|
9 |
cached_models = {} # cache for models to avoid repeated loading, key is model name
|
10 |
def cache_model(func):
|
|
|
4 |
from copy import deepcopy
|
5 |
|
6 |
ENABLE_CPU_CACHE = False
|
7 |
+
DEFAULT_BASE_MODEL = "stable-diffusion-v1-5/stable-diffusion-v1-5"
|
8 |
|
9 |
cached_models = {} # cache for models to avoid repeated loading, key is model name
|
10 |
def cache_model(func):
|