End of training
Browse files- .gitattributes +3 -0
- README.md +47 -0
- checkpoint-1000/controlnet/config.json +56 -0
- checkpoint-1000/controlnet/diffusion_pytorch_model.safetensors +3 -0
- checkpoint-1000/optimizer.bin +3 -0
- checkpoint-1000/random_states_0.pkl +3 -0
- checkpoint-1000/scheduler.bin +3 -0
- config.json +56 -0
- diffusion_pytorch_model.safetensors +3 -0
- image_control.png +0 -0
- images_0.png +3 -0
- images_1.png +3 -0
- images_2.png +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
images_0.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
images_1.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
images_2.png filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
base_model: stabilityai/stable-diffusion-2-1-base
|
3 |
+
library_name: diffusers
|
4 |
+
license: creativeml-openrail-m
|
5 |
+
tags:
|
6 |
+
- stable-diffusion
|
7 |
+
- stable-diffusion-diffusers
|
8 |
+
- text-to-image
|
9 |
+
- diffusers
|
10 |
+
- controlnet
|
11 |
+
- diffusers-training
|
12 |
+
inference: true
|
13 |
+
---
|
14 |
+
|
15 |
+
<!-- This model card has been generated automatically according to the information the training script had access to. You
|
16 |
+
should probably proofread and complete it, then remove this comment. -->
|
17 |
+
|
18 |
+
|
19 |
+
# controlnet-manhattan23/output
|
20 |
+
|
21 |
+
These are controlnet weights trained on stabilityai/stable-diffusion-2-1-base with new type of conditioning.
|
22 |
+
You can find some example images below.
|
23 |
+
|
24 |
+
prompt: A beautiful woman taking a picture with her smart phone.
|
25 |
+

|
26 |
+
prompt: A young man bending next to a toilet.
|
27 |
+

|
28 |
+
prompt: This is a picture of a kitchen in a poverty area of a town.
|
29 |
+

|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
## Intended uses & limitations
|
34 |
+
|
35 |
+
#### How to use
|
36 |
+
|
37 |
+
```python
|
38 |
+
# TODO: add an example code snippet for running this diffusion pipeline
|
39 |
+
```
|
40 |
+
|
41 |
+
#### Limitations and bias
|
42 |
+
|
43 |
+
[TODO: provide examples of latent issues and potential remediations]
|
44 |
+
|
45 |
+
## Training details
|
46 |
+
|
47 |
+
[TODO: describe the data used to train the model]
|
checkpoint-1000/controlnet/config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "ControlNetModel",
|
3 |
+
"_diffusers_version": "0.30.0.dev0",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"addition_embed_type": null,
|
6 |
+
"addition_embed_type_num_heads": 64,
|
7 |
+
"addition_time_embed_dim": null,
|
8 |
+
"attention_head_dim": [
|
9 |
+
5,
|
10 |
+
10,
|
11 |
+
20,
|
12 |
+
20
|
13 |
+
],
|
14 |
+
"block_out_channels": [
|
15 |
+
320,
|
16 |
+
640,
|
17 |
+
1280,
|
18 |
+
1280
|
19 |
+
],
|
20 |
+
"class_embed_type": null,
|
21 |
+
"conditioning_channels": 3,
|
22 |
+
"conditioning_embedding_out_channels": [
|
23 |
+
16,
|
24 |
+
32,
|
25 |
+
96,
|
26 |
+
256
|
27 |
+
],
|
28 |
+
"controlnet_conditioning_channel_order": "rgb",
|
29 |
+
"cross_attention_dim": 1024,
|
30 |
+
"down_block_types": [
|
31 |
+
"CrossAttnDownBlock2D",
|
32 |
+
"CrossAttnDownBlock2D",
|
33 |
+
"CrossAttnDownBlock2D",
|
34 |
+
"DownBlock2D"
|
35 |
+
],
|
36 |
+
"downsample_padding": 1,
|
37 |
+
"encoder_hid_dim": null,
|
38 |
+
"encoder_hid_dim_type": null,
|
39 |
+
"flip_sin_to_cos": true,
|
40 |
+
"freq_shift": 0,
|
41 |
+
"global_pool_conditions": false,
|
42 |
+
"in_channels": 4,
|
43 |
+
"layers_per_block": 2,
|
44 |
+
"mid_block_scale_factor": 1,
|
45 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
46 |
+
"norm_eps": 1e-05,
|
47 |
+
"norm_num_groups": 32,
|
48 |
+
"num_attention_heads": null,
|
49 |
+
"num_class_embeds": null,
|
50 |
+
"only_cross_attention": false,
|
51 |
+
"projection_class_embeddings_input_dim": null,
|
52 |
+
"resnet_time_scale_shift": "default",
|
53 |
+
"transformer_layers_per_block": 1,
|
54 |
+
"upcast_attention": false,
|
55 |
+
"use_linear_projection": true
|
56 |
+
}
|
checkpoint-1000/controlnet/diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a37ce51ba68b812e4497e7cefe268b3cc5b28950407067f7d78f92ae301aab9b
|
3 |
+
size 1456953560
|
checkpoint-1000/optimizer.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a57f0ce43d0b675e448d10fa5b346990646bb2f6d1ed0fced5f2dc9602ed6bcb
|
3 |
+
size 2914110745
|
checkpoint-1000/random_states_0.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:94efd0a2c39fc0ad267e26a2b321ce15e87fe514110796e3ea16a678375a4c35
|
3 |
+
size 17683
|
checkpoint-1000/scheduler.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7edd8082ac389d0c946c2ac507fc3e4d21eaad8a44fcb3f7b57ebbd1b62363dc
|
3 |
+
size 563
|
config.json
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_class_name": "ControlNetModel",
|
3 |
+
"_diffusers_version": "0.30.0.dev0",
|
4 |
+
"act_fn": "silu",
|
5 |
+
"addition_embed_type": null,
|
6 |
+
"addition_embed_type_num_heads": 64,
|
7 |
+
"addition_time_embed_dim": null,
|
8 |
+
"attention_head_dim": [
|
9 |
+
5,
|
10 |
+
10,
|
11 |
+
20,
|
12 |
+
20
|
13 |
+
],
|
14 |
+
"block_out_channels": [
|
15 |
+
320,
|
16 |
+
640,
|
17 |
+
1280,
|
18 |
+
1280
|
19 |
+
],
|
20 |
+
"class_embed_type": null,
|
21 |
+
"conditioning_channels": 3,
|
22 |
+
"conditioning_embedding_out_channels": [
|
23 |
+
16,
|
24 |
+
32,
|
25 |
+
96,
|
26 |
+
256
|
27 |
+
],
|
28 |
+
"controlnet_conditioning_channel_order": "rgb",
|
29 |
+
"cross_attention_dim": 1024,
|
30 |
+
"down_block_types": [
|
31 |
+
"CrossAttnDownBlock2D",
|
32 |
+
"CrossAttnDownBlock2D",
|
33 |
+
"CrossAttnDownBlock2D",
|
34 |
+
"DownBlock2D"
|
35 |
+
],
|
36 |
+
"downsample_padding": 1,
|
37 |
+
"encoder_hid_dim": null,
|
38 |
+
"encoder_hid_dim_type": null,
|
39 |
+
"flip_sin_to_cos": true,
|
40 |
+
"freq_shift": 0,
|
41 |
+
"global_pool_conditions": false,
|
42 |
+
"in_channels": 4,
|
43 |
+
"layers_per_block": 2,
|
44 |
+
"mid_block_scale_factor": 1,
|
45 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
46 |
+
"norm_eps": 1e-05,
|
47 |
+
"norm_num_groups": 32,
|
48 |
+
"num_attention_heads": null,
|
49 |
+
"num_class_embeds": null,
|
50 |
+
"only_cross_attention": false,
|
51 |
+
"projection_class_embeddings_input_dim": null,
|
52 |
+
"resnet_time_scale_shift": "default",
|
53 |
+
"transformer_layers_per_block": 1,
|
54 |
+
"upcast_attention": false,
|
55 |
+
"use_linear_projection": true
|
56 |
+
}
|
diffusion_pytorch_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6c9201f66ea7a1684b96e2d6a3737c096fdd8932845954b8b75524f7eacebd62
|
3 |
+
size 1456953560
|
image_control.png
ADDED
![]() |
images_0.png
ADDED
![]() |
Git LFS Details
|
images_1.png
ADDED
![]() |
Git LFS Details
|
images_2.png
ADDED
![]() |
Git LFS Details
|