moritzef commited on
Commit
acc23bf
·
verified ·
1 Parent(s): 72ebd7b

End of training

Browse files
README.md ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ library_name: diffusers
4
+ tags:
5
+ - stable-diffusion
6
+ - stable-diffusion-diffusers
7
+ - text-to-image
8
+ - diffusers
9
+ - controlnet
10
+ - diffusers-training
11
+ base_model: runwayml/stable-diffusion-v1-5
12
+ inference: true
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the training script had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+
19
+ # controlnet-moritzef/model_sd15
20
+
21
+ These are controlnet weights trained on runwayml/stable-diffusion-v1-5 with new type of conditioning.
22
+ You can find some example images below.
23
+
24
+ prompt: A realistic google streetview image taken in Berlin (Germany), that looks normal and has a beauty-score of 24, where scores are between 10 and 40 and higher scores indicate more beauty.
25
+ ![images_0)](./images_0.png)
26
+ prompt: A realistic google streetview image taken in New York (USA), that looks normal and has a beauty-score of 27, where scores are between 10 and 40 and higher scores indicate more beauty.
27
+ ![images_1)](./images_1.png)
28
+ prompt: A realistic google streetview image taken in Rome (Italy), that looks normal and has a beauty-score of 23, where scores are between 10 and 40 and higher scores indicate more beauty.
29
+ ![images_2)](./images_2.png)
30
+ prompt: A realistic google streetview image taken in Mexico City (Mexico), that looks very ugly and has a beauty-score of 18, where scores are between 10 and 40 and higher scores indicate more beauty.
31
+ ![images_3)](./images_3.png)
32
+ prompt: A realistic google streetview image taken in Tel Aviv (Israel), that looks normal and has a beauty-score of 24, where scores are between 10 and 40 and higher scores indicate more beauty.
33
+ ![images_4)](./images_4.png)
34
+ prompt: A realistic google streetview image taken in Kyoto (Japan), that looks very ugly and has a beauty-score of 16, where scores are between 10 and 40 and higher scores indicate more beauty.
35
+ ![images_5)](./images_5.png)
36
+ prompt: A realistic google streetview image taken in Gaborone (Botswana), that looks normal and has a beauty-score of 24, where scores are between 10 and 40 and higher scores indicate more beauty.
37
+ ![images_6)](./images_6.png)
38
+ prompt: A realistic google streetview image taken in Melbourne (Australia), that looks normal and has a beauty-score of 28, where scores are between 10 and 40 and higher scores indicate more beauty.
39
+ ![images_7)](./images_7.png)
40
+
41
+
42
+
43
+ ## Intended uses & limitations
44
+
45
+ #### How to use
46
+
47
+ ```python
48
+ # TODO: add an example code snippet for running this diffusion pipeline
49
+ ```
50
+
51
+ #### Limitations and bias
52
+
53
+ [TODO: provide examples of latent issues and potential remediations]
54
+
55
+ ## Training details
56
+
57
+ [TODO: describe the data used to train the model]
checkpoint-40000/controlnet/config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ControlNetModel",
3
+ "_diffusers_version": "0.30.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "class_embed_type": null,
16
+ "conditioning_channels": 3,
17
+ "conditioning_embedding_out_channels": [
18
+ 16,
19
+ 32,
20
+ 96,
21
+ 256
22
+ ],
23
+ "controlnet_conditioning_channel_order": "rgb",
24
+ "cross_attention_dim": 768,
25
+ "down_block_types": [
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "DownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "encoder_hid_dim": null,
33
+ "encoder_hid_dim_type": null,
34
+ "flip_sin_to_cos": true,
35
+ "freq_shift": 0,
36
+ "global_pool_conditions": false,
37
+ "in_channels": 4,
38
+ "layers_per_block": 2,
39
+ "mid_block_scale_factor": 1,
40
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
41
+ "norm_eps": 1e-05,
42
+ "norm_num_groups": 32,
43
+ "num_attention_heads": null,
44
+ "num_class_embeds": null,
45
+ "only_cross_attention": false,
46
+ "projection_class_embeddings_input_dim": null,
47
+ "resnet_time_scale_shift": "default",
48
+ "transformer_layers_per_block": 1,
49
+ "upcast_attention": false,
50
+ "use_linear_projection": false
51
+ }
checkpoint-40000/controlnet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2da22c7c4b2e076e2799921766440ade38f66e73f89d1d3602e2e0475be515ea
3
+ size 1445157120
checkpoint-40000/optimizer.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0274a5266fee8d6012b9d61f34dfd051879fc9c6745ee4bd943953819bd449ac
3
+ size 2890518478
checkpoint-40000/random_states_0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51a7e0e8737720db8adac2c9fd978bf5493be9c9a48c4d0712f5f3202e2a117f
3
+ size 14344
checkpoint-40000/scheduler.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3545acd782fef9a3052d35e8588ad863cba8df6191a8d8b91634d123b7bb20e
3
+ size 1000
config.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "ControlNetModel",
3
+ "_diffusers_version": "0.30.0.dev0",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": null,
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": null,
8
+ "attention_head_dim": 8,
9
+ "block_out_channels": [
10
+ 320,
11
+ 640,
12
+ 1280,
13
+ 1280
14
+ ],
15
+ "class_embed_type": null,
16
+ "conditioning_channels": 3,
17
+ "conditioning_embedding_out_channels": [
18
+ 16,
19
+ 32,
20
+ 96,
21
+ 256
22
+ ],
23
+ "controlnet_conditioning_channel_order": "rgb",
24
+ "cross_attention_dim": 768,
25
+ "down_block_types": [
26
+ "CrossAttnDownBlock2D",
27
+ "CrossAttnDownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "DownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "encoder_hid_dim": null,
33
+ "encoder_hid_dim_type": null,
34
+ "flip_sin_to_cos": true,
35
+ "freq_shift": 0,
36
+ "global_pool_conditions": false,
37
+ "in_channels": 4,
38
+ "layers_per_block": 2,
39
+ "mid_block_scale_factor": 1,
40
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
41
+ "norm_eps": 1e-05,
42
+ "norm_num_groups": 32,
43
+ "num_attention_heads": null,
44
+ "num_class_embeds": null,
45
+ "only_cross_attention": false,
46
+ "projection_class_embeddings_input_dim": null,
47
+ "resnet_time_scale_shift": "default",
48
+ "transformer_layers_per_block": 1,
49
+ "upcast_attention": false,
50
+ "use_linear_projection": false
51
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2be74f3ebdd5c2ff8caa4a5d3200be4c03fa0c5749cac16ec2f901f7fb011f0
3
+ size 1445157120
image_control.png ADDED
images_0.png ADDED
images_1.png ADDED
images_2.png ADDED
images_3.png ADDED
images_4.png ADDED
images_5.png ADDED
images_6.png ADDED
images_7.png ADDED