EpicJhon commited on
Commit
7d17157
1 Parent(s): f96b39d

Initial commit with folder contents

Browse files
Files changed (30) hide show
  1. .gitmodules +4 -0
  2. models/newdream-sdxl-20/.gitattributes +35 -0
  3. models/newdream-sdxl-20/.gitignore +2 -0
  4. models/newdream-sdxl-20/README.md +66 -0
  5. models/newdream-sdxl-20/model_index.json +33 -0
  6. models/newdream-sdxl-20/scheduler/scheduler_config.json +18 -0
  7. models/newdream-sdxl-20/text_encoder/config.json +24 -0
  8. models/newdream-sdxl-20/text_encoder/model.safetensors +3 -0
  9. models/newdream-sdxl-20/text_encoder/pytorch_model.bin +3 -0
  10. models/newdream-sdxl-20/text_encoder_2/config.json +24 -0
  11. models/newdream-sdxl-20/text_encoder_2/model.safetensors +3 -0
  12. models/newdream-sdxl-20/text_encoder_2/pytorch_model.bin +3 -0
  13. models/newdream-sdxl-20/tokenizer/merges.txt +0 -0
  14. models/newdream-sdxl-20/tokenizer/special_tokens_map.json +24 -0
  15. models/newdream-sdxl-20/tokenizer/tokenizer_config.json +33 -0
  16. models/newdream-sdxl-20/tokenizer/vocab.json +0 -0
  17. models/newdream-sdxl-20/tokenizer_2/merges.txt +0 -0
  18. models/newdream-sdxl-20/tokenizer_2/special_tokens_map.json +24 -0
  19. models/newdream-sdxl-20/tokenizer_2/tokenizer_config.json +33 -0
  20. models/newdream-sdxl-20/tokenizer_2/vocab.json +0 -0
  21. models/newdream-sdxl-20/unet/config.json +71 -0
  22. models/newdream-sdxl-20/unet/diffusion_pytorch_model.bin +3 -0
  23. models/newdream-sdxl-20/unet/diffusion_pytorch_model.safetensors +3 -0
  24. models/newdream-sdxl-20/vae/config.json +31 -0
  25. models/newdream-sdxl-20/vae/diffusion_pytorch_model.bin +3 -0
  26. models/newdream-sdxl-20/vae/diffusion_pytorch_model.safetensors +3 -0
  27. pyproject.toml +22 -0
  28. requirements.txt +2 -0
  29. src/main.py +50 -0
  30. src/pipeline.py +65 -0
.gitmodules ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [submodule "newdream-sdxl-20"]
2
+ path = models/newdream-sdxl-20
3
+ url = https://huggingface.co/stablediffusionapi/newdream-sdxl-20
4
+ branch = main
models/newdream-sdxl-20/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/newdream-sdxl-20/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ step_*
2
+ epoch_*
models/newdream-sdxl-20/README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ tags:
4
+ - stablediffusionapi.com
5
+ - stable-diffusion-api
6
+ - text-to-image
7
+ - ultra-realistic
8
+ pinned: true
9
+ ---
10
+
11
+ # NewDream-SDXL 2.0 API Inference
12
+
13
+ ![generated from stablediffusionapi.com](https://pub-3626123a908346a7a8be8d9295f44e26.r2.dev/generations/8478583971702167737.png)
14
+ ## Get API Key
15
+
16
+ Get API key from [Stable Diffusion API](http://stablediffusionapi.com/), No Payment needed.
17
+
18
+ Replace Key in below code, change **model_id** to "newdream-sdxl-20"
19
+
20
+ Coding in PHP/Node/Java etc? Have a look at docs for more code examples: [View docs](https://stablediffusionapi.com/docs)
21
+
22
+ Try model for free: [Generate Images](https://stablediffusionapi.com/models/newdream-sdxl-20)
23
+
24
+ Model link: [View model](https://stablediffusionapi.com/models/newdream-sdxl-20)
25
+
26
+ Credits: [View credits](https://civitai.com/?query=NewDream-SDXL%202.0)
27
+
28
+ View all models: [View Models](https://stablediffusionapi.com/models)
29
+
30
+ import requests
31
+ import json
32
+
33
+ url = "https://stablediffusionapi.com/api/v4/dreambooth"
34
+
35
+ payload = json.dumps({
36
+ "key": "your_api_key",
37
+ "model_id": "newdream-sdxl-20",
38
+ "prompt": "ultra realistic close up portrait ((beautiful pale cyberpunk female with heavy black eyeliner)), blue eyes, shaved side haircut, hyper detail, cinematic lighting, magic neon, dark red city, Canon EOS R3, nikon, f/1.4, ISO 200, 1/160s, 8K, RAW, unedited, symmetrical balance, in-frame, 8K",
39
+ "negative_prompt": "painting, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, deformed, ugly, blurry, bad anatomy, bad proportions, extra limbs, cloned face, skinny, glitchy, double torso, extra arms, extra hands, mangled fingers, missing lips, ugly face, distorted face, extra legs, anime",
40
+ "width": "512",
41
+ "height": "512",
42
+ "samples": "1",
43
+ "num_inference_steps": "30",
44
+ "safety_checker": "no",
45
+ "enhance_prompt": "yes",
46
+ "seed": None,
47
+ "guidance_scale": 7.5,
48
+ "multi_lingual": "no",
49
+ "panorama": "no",
50
+ "self_attention": "no",
51
+ "upscale": "no",
52
+ "embeddings": "embeddings_model_id",
53
+ "lora": "lora_model_id",
54
+ "webhook": None,
55
+ "track_id": None
56
+ })
57
+
58
+ headers = {
59
+ 'Content-Type': 'application/json'
60
+ }
61
+
62
+ response = requests.request("POST", url, headers=headers, data=payload)
63
+
64
+ print(response.text)
65
+
66
+ > Use this coupon code to get 25% off **DMGG0RBN**
models/newdream-sdxl-20/model_index.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "StableDiffusionXLPipeline",
3
+ "_diffusers_version": "0.21.2",
4
+ "force_zeros_for_empty_prompt": true,
5
+ "scheduler": [
6
+ "diffusers",
7
+ "EulerDiscreteScheduler"
8
+ ],
9
+ "text_encoder": [
10
+ "transformers",
11
+ "CLIPTextModel"
12
+ ],
13
+ "text_encoder_2": [
14
+ "transformers",
15
+ "CLIPTextModelWithProjection"
16
+ ],
17
+ "tokenizer": [
18
+ "transformers",
19
+ "CLIPTokenizer"
20
+ ],
21
+ "tokenizer_2": [
22
+ "transformers",
23
+ "CLIPTokenizer"
24
+ ],
25
+ "unet": [
26
+ "diffusers",
27
+ "UNet2DConditionModel"
28
+ ],
29
+ "vae": [
30
+ "diffusers",
31
+ "AutoencoderKL"
32
+ ]
33
+ }
models/newdream-sdxl-20/scheduler/scheduler_config.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "EulerDiscreteScheduler",
3
+ "_diffusers_version": "0.21.2",
4
+ "beta_end": 0.012,
5
+ "beta_schedule": "scaled_linear",
6
+ "beta_start": 0.00085,
7
+ "clip_sample": false,
8
+ "interpolation_type": "linear",
9
+ "num_train_timesteps": 1000,
10
+ "prediction_type": "epsilon",
11
+ "sample_max_value": 1.0,
12
+ "set_alpha_to_one": false,
13
+ "skip_prk_steps": true,
14
+ "steps_offset": 1,
15
+ "timestep_spacing": "leading",
16
+ "trained_betas": null,
17
+ "use_karras_sigmas": false
18
+ }
models/newdream-sdxl-20/text_encoder/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModel"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "quick_gelu",
10
+ "hidden_size": 768,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 768,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.33.1",
23
+ "vocab_size": 49408
24
+ }
models/newdream-sdxl-20/text_encoder/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57f094c002b6f50986b68a714285e72a23c17d9e1b146b078a2219397c51e37a
3
+ size 246144152
models/newdream-sdxl-20/text_encoder/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:192a06f4ef7ece4acb33fc3c717790ee37b1f5d85e48e0dcac54dfea93e584a2
3
+ size 246185562
models/newdream-sdxl-20/text_encoder_2/config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "CLIPTextModelWithProjection"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 0,
7
+ "dropout": 0.0,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "gelu",
10
+ "hidden_size": 1280,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5120,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 77,
16
+ "model_type": "clip_text_model",
17
+ "num_attention_heads": 20,
18
+ "num_hidden_layers": 32,
19
+ "pad_token_id": 1,
20
+ "projection_dim": 1280,
21
+ "torch_dtype": "float16",
22
+ "transformers_version": "4.33.1",
23
+ "vocab_size": 49408
24
+ }
models/newdream-sdxl-20/text_encoder_2/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a0d55a94e8508c869f35163fb6fcf34e02ea1b614d9259b47f97c562cff9575
3
+ size 1389382176
models/newdream-sdxl-20/text_encoder_2/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8841d5e4c05ce74941eb536ec3835f600cc82d36763fc5f30c69d09a886158c9
3
+ size 1389490462
models/newdream-sdxl-20/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<|endoftext|>",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
models/newdream-sdxl-20/tokenizer/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "do_lower_case": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 77,
23
+ "pad_token": "<|endoftext|>",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
models/newdream-sdxl-20/tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "!",
17
+ "unk_token": {
18
+ "content": "<|endoftext|>",
19
+ "lstrip": false,
20
+ "normalized": true,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
models/newdream-sdxl-20/tokenizer_2/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": {
4
+ "__type": "AddedToken",
5
+ "content": "<|startoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false
10
+ },
11
+ "clean_up_tokenization_spaces": true,
12
+ "do_lower_case": true,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "<|endoftext|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "errors": "replace",
22
+ "model_max_length": 77,
23
+ "pad_token": "!",
24
+ "tokenizer_class": "CLIPTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "<|endoftext|>",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
models/newdream-sdxl-20/tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
models/newdream-sdxl-20/unet/config.json ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.21.2",
4
+ "act_fn": "silu",
5
+ "addition_embed_type": "text_time",
6
+ "addition_embed_type_num_heads": 64,
7
+ "addition_time_embed_dim": 256,
8
+ "attention_head_dim": [
9
+ 5,
10
+ 10,
11
+ 20
12
+ ],
13
+ "attention_type": "default",
14
+ "block_out_channels": [
15
+ 320,
16
+ 640,
17
+ 1280
18
+ ],
19
+ "center_input_sample": false,
20
+ "class_embed_type": null,
21
+ "class_embeddings_concat": false,
22
+ "conv_in_kernel": 3,
23
+ "conv_out_kernel": 3,
24
+ "cross_attention_dim": 2048,
25
+ "cross_attention_norm": null,
26
+ "down_block_types": [
27
+ "DownBlock2D",
28
+ "CrossAttnDownBlock2D",
29
+ "CrossAttnDownBlock2D"
30
+ ],
31
+ "downsample_padding": 1,
32
+ "dropout": 0.0,
33
+ "dual_cross_attention": false,
34
+ "encoder_hid_dim": null,
35
+ "encoder_hid_dim_type": null,
36
+ "flip_sin_to_cos": true,
37
+ "freq_shift": 0,
38
+ "in_channels": 4,
39
+ "layers_per_block": 2,
40
+ "mid_block_only_cross_attention": null,
41
+ "mid_block_scale_factor": 1,
42
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
43
+ "norm_eps": 1e-05,
44
+ "norm_num_groups": 32,
45
+ "num_attention_heads": null,
46
+ "num_class_embeds": null,
47
+ "only_cross_attention": false,
48
+ "out_channels": 4,
49
+ "projection_class_embeddings_input_dim": 2816,
50
+ "resnet_out_scale_factor": 1.0,
51
+ "resnet_skip_time_act": false,
52
+ "resnet_time_scale_shift": "default",
53
+ "sample_size": 128,
54
+ "time_cond_proj_dim": null,
55
+ "time_embedding_act_fn": null,
56
+ "time_embedding_dim": null,
57
+ "time_embedding_type": "positional",
58
+ "timestep_post_act": null,
59
+ "transformer_layers_per_block": [
60
+ 1,
61
+ 2,
62
+ 10
63
+ ],
64
+ "up_block_types": [
65
+ "CrossAttnUpBlock2D",
66
+ "CrossAttnUpBlock2D",
67
+ "UpBlock2D"
68
+ ],
69
+ "upcast_attention": false,
70
+ "use_linear_projection": true
71
+ }
models/newdream-sdxl-20/unet/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56913b74b493462516ce770d525cca996feef7acd44cf0cba65de667a6c1d58c
3
+ size 5135669022
models/newdream-sdxl-20/unet/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9715fb3c6d0d81f46fbe6be46ddce05d91806aaf041504a52a44e1ec9e4f660f
3
+ size 5135149760
models/newdream-sdxl-20/vae/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "AutoencoderKL",
3
+ "_diffusers_version": "0.21.2",
4
+ "act_fn": "silu",
5
+ "block_out_channels": [
6
+ 128,
7
+ 256,
8
+ 512,
9
+ 512
10
+ ],
11
+ "down_block_types": [
12
+ "DownEncoderBlock2D",
13
+ "DownEncoderBlock2D",
14
+ "DownEncoderBlock2D",
15
+ "DownEncoderBlock2D"
16
+ ],
17
+ "force_upcast": true,
18
+ "in_channels": 3,
19
+ "latent_channels": 4,
20
+ "layers_per_block": 2,
21
+ "norm_num_groups": 32,
22
+ "out_channels": 3,
23
+ "sample_size": 1024,
24
+ "scaling_factor": 0.13025,
25
+ "up_block_types": [
26
+ "UpDecoderBlock2D",
27
+ "UpDecoderBlock2D",
28
+ "UpDecoderBlock2D",
29
+ "UpDecoderBlock2D"
30
+ ]
31
+ }
models/newdream-sdxl-20/vae/diffusion_pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33c40ff3dc7adeb21dce76cd411d65828037efa0aa54432e3592418401cf8467
3
+ size 167404866
models/newdream-sdxl-20/vae/diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:915b909d0eeef5985462226b2c9950ca9da42b5a6ec8c296c2e73f6419ae465c
3
+ size 167335342
pyproject.toml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools >= 61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [project]
6
+ name = "edge-maxxing-4090-newdream"
7
+ description = "An edge-maxxing model submission for the 4090 newdream contest"
8
+ requires-python = ">=3.10,<3.11"
9
+ version = "1.0.0"
10
+ dependencies = [
11
+ "numpy==1.26.4",
12
+ "torch==2.2",
13
+ "xformers==0.0.24",
14
+ "diffusers==0.30.2",
15
+ "transformers==4.41.2",
16
+ "accelerate==0.31.0",
17
+ "omegaconf==2.3.0",
18
+ "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing#subdirectory=pipelines",
19
+ ]
20
+
21
+ [project.scripts]
22
+ start_inference = "main:main"
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # Specify any extra options here, like --find-links, --pre, etc. Avoid specifying dependencies here and specify them in pyproject.toml instead
2
+ https://github.com/chengzeyi/stable-fast/releases/download/v1.0.5/stable_fast-1.0.5+torch222cu121-cp310-cp310-manylinux2014_x86_64.whl
src/main.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from io import BytesIO
2
+ from multiprocessing.connection import Listener
3
+ from os import chmod, remove
4
+ from os.path import abspath, exists
5
+ from pathlib import Path
6
+
7
+ from PIL.JpegImagePlugin import JpegImageFile
8
+ from pipelines.models import TextToImageRequest
9
+
10
+ from pipeline import load_pipeline, infer
11
+
12
+ SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
13
+
14
+
15
+ def main():
16
+ print(f"Loading pipeline")
17
+ pipeline = load_pipeline()
18
+
19
+ print(f"Pipeline loaded, creating socket at '{SOCKET}'")
20
+
21
+ if exists(SOCKET):
22
+ remove(SOCKET)
23
+
24
+ with Listener(SOCKET) as listener:
25
+ chmod(SOCKET, 0o777)
26
+
27
+ print(f"Awaiting connections")
28
+ with listener.accept() as connection:
29
+ print(f"Connected")
30
+
31
+ while True:
32
+ try:
33
+ request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
34
+ except EOFError:
35
+ print(f"Inference socket exiting")
36
+
37
+ return
38
+
39
+ image = infer(request, pipeline)
40
+
41
+ data = BytesIO()
42
+ image.save(data, format=JpegImageFile.format)
43
+
44
+ packet = data.getvalue()
45
+
46
+ connection.send_bytes(packet)
47
+
48
+
49
+ if __name__ == '__main__':
50
+ main()
src/pipeline.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL.Image import Image
3
+ from diffusers import StableDiffusionXLPipeline, LCMScheduler
4
+ from pipelines.models import TextToImageRequest
5
+ from torch import Generator
6
+ from sfast.compilers.diffusion_pipeline_compiler import (compile,
7
+ CompilationConfig)
8
+
9
+ '''
10
+ def load_pipeline() -> StableDiffusionXLPipeline:
11
+ pipe = StableDiffusionXLPipeline.from_pretrained(
12
+ "models/newdream-sdxl-20", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
13
+ ).to("cuda:0")
14
+
15
+ prompt = "an elephant, a tiger and a rabbit playing together"
16
+ helper = DeepCacheSDHelper(pipe=pipe)
17
+ helper.set_params(
18
+ cache_interval=2,
19
+ cache_branch_id=2,
20
+ )
21
+ helper.enable()
22
+ pipe(prompt, num_inference_steps=20).images[0].save("original.png")
23
+ # pipe.fuse_qkv_projections()
24
+ return pipe
25
+ '''
26
+
27
+ def load_pipeline() -> StableDiffusionXLPipeline:
28
+ pipe = StableDiffusionXLPipeline.from_pretrained(
29
+ "models/newdream-sdxl-20", torch_dtype=torch.float16, use_safetensors=True
30
+ ).to("cuda:0")
31
+
32
+ prompt = "future punk robot shooting"
33
+ pipe.fuse_qkv_projections()
34
+ config = CompilationConfig.Default()
35
+ # xformers and Triton are suggested for achieving best performance.
36
+ try:
37
+ import xformers
38
+ config.enable_xformers = True
39
+ except ImportError:
40
+ print('xformers not installed, skip')
41
+ try:
42
+ import triton
43
+ config.enable_triton = True
44
+ except ImportError:
45
+ print('Triton not installed, skip')
46
+ # CUDA Graph is suggested for small batch sizes and small resolutions to reduce CPU overhead.
47
+ # But it can increase the amount of GPU memory used.
48
+ # For StableVideoDiffusionPipeline it is not needed.
49
+ config.enable_cuda_graph = True
50
+
51
+ pipe = compile(pipe, config)
52
+ pipe(prompt)
53
+ return pipe
54
+
55
+ def infer(request: TextToImageRequest, pipeline: StableDiffusionXLPipeline) -> Image:
56
+ generator = Generator(pipeline.device).manual_seed(request.seed) if request.seed else None
57
+
58
+ return pipeline(
59
+ prompt=request.prompt,
60
+ negative_prompt=request.negative_prompt,
61
+ width=request.width,
62
+ height=request.height,
63
+ generator=generator,
64
+ num_inference_steps=10,
65
+ ).images[0]