samedii commited on
Commit
0b594f1
1 Parent(s): 558d756

feature: pixel party xl v1.0 model release

Browse files
README.md ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: creativeml-openrail-m
3
+ tags:
4
+ - text-to-image
5
+ - stable-diffusion
6
+ - diffusers
7
+ base_model: stabilityai/stable-diffusion-xl-base-1.0
8
+ instance_prompt: . in pixel art style
9
+ widget:
10
+ - text: cute dragon. in pixel art style
11
+ ---
12
+
13
+ # Pixel Party XL
14
+
15
+ This is a full model training for better pixel art adherence based on SDXL. Feel free to use this model for your own projects, but please do not host it.
16
+
17
+ ![examples](images/examples.gif)
18
+
19
+ We are building on tools for indie game development and currently have tools:
20
+
21
+ - Map tiles
22
+ - Movement animations
23
+ - Attack animations
24
+ - Inpainting
25
+ - Character reshaping
26
+ - Animation interpolation
27
+
28
+ And have much more planned! :D
29
+
30
+ If you want to support us or check out our other pixel art models, you can find us here [PixelLab](https://www.pixellab.ai) or on [Discord](https://discord.gg/pBeyTBF8T7).
31
+
32
+ ## How to use
33
+
34
+ - Append ". in pixel art style" to your prompt. E.g. "cute dragon. in pixel art style"
35
+ - Downsize the image 8x using nearest neighbor
36
+ - Init images are very helpful
37
+ - Model works best at around 128x128 canvas size but still excels at creating smaller items/characters/other
38
+ - Use a VAE with fixed fp16 support: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
39
+ - Do not use refiner
40
+
41
+ ### Diffusers
42
+
43
+ ```python
44
+ from diffusers import DiffusionPipeline, UNet2DConditionModel
45
+ import torch
46
+
47
+ pipe = DiffusionPipeline.from_pretrained(
48
+ "stabilityai/stable-diffusion-xl-base-1.0",
49
+ torch_dtype=torch.float16,
50
+ unet=UNet2DConditionModel.from_pretrained("pixel-party-v1.0", torch_dtype=torch.float16),
51
+ use_safetensors=True,
52
+ variant="fp16",
53
+ )
54
+ pipe.to("cuda")
55
+
56
+ torch.manual_seed(11215)
57
+ prompt = "cute dragon. in pixel art style"
58
+ negative_prompt = "mixels. amateur. multiple"
59
+
60
+ image = pipe(prompt, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
61
+ ```
62
+
63
+ ## License
64
+
65
+ Please do not host this model. It is otherwise licensed under CreativeML-OpenRail-M.
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.19.3",
4
+ "_name_or_path": "pixel-party-xl-v1.0/denoiser/unet",
5
+ "act_fn": "silu",
6
+ "addition_embed_type": "text_time",
7
+ "addition_embed_type_num_heads": 64,
8
+ "addition_time_embed_dim": 256,
9
+ "attention_head_dim": [5, 10, 20],
10
+ "block_out_channels": [320, 640, 1280],
11
+ "center_input_sample": false,
12
+ "class_embed_type": null,
13
+ "class_embeddings_concat": false,
14
+ "conv_in_kernel": 3,
15
+ "conv_out_kernel": 3,
16
+ "cross_attention_dim": 2048,
17
+ "cross_attention_norm": null,
18
+ "down_block_types": [
19
+ "DownBlock2D",
20
+ "CrossAttnDownBlock2D",
21
+ "CrossAttnDownBlock2D"
22
+ ],
23
+ "downsample_padding": 1,
24
+ "dual_cross_attention": false,
25
+ "encoder_hid_dim": null,
26
+ "encoder_hid_dim_type": null,
27
+ "flip_sin_to_cos": true,
28
+ "freq_shift": 0,
29
+ "in_channels": 4,
30
+ "layers_per_block": 2,
31
+ "mid_block_only_cross_attention": null,
32
+ "mid_block_scale_factor": 1,
33
+ "mid_block_type": "UNetMidBlock2DCrossAttn",
34
+ "norm_eps": 1e-5,
35
+ "norm_num_groups": 32,
36
+ "num_attention_heads": null,
37
+ "num_class_embeds": null,
38
+ "only_cross_attention": false,
39
+ "out_channels": 4,
40
+ "projection_class_embeddings_input_dim": 2816,
41
+ "resnet_out_scale_factor": 1.0,
42
+ "resnet_skip_time_act": false,
43
+ "resnet_time_scale_shift": "default",
44
+ "sample_size": 128,
45
+ "time_cond_proj_dim": null,
46
+ "time_embedding_act_fn": null,
47
+ "time_embedding_dim": null,
48
+ "time_embedding_type": "positional",
49
+ "timestep_post_act": null,
50
+ "transformer_layers_per_block": [1, 2, 10],
51
+ "up_block_types": ["CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"],
52
+ "upcast_attention": null,
53
+ "use_linear_projection": true
54
+ }
diffusion_pytorch_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ada8d88a0a7fe5417d4d63ee4a66897383ec682c1cd54f083acbfd47a57e7779
3
+ size 5135149760
images/examples.gif ADDED