remove_pooling_from_te
Browse files- 1.png +0 -0
- README.md +4 -41
- Untitled.ipynb +0 -0
- pipeline_waifu.py +2 -0
- promo.png +2 -2
- test.ipynb +0 -0
- transformer/config.json +2 -1
- transformer/diffusion_pytorch_model.fp16.safetensors +1 -1
- waifu.png +0 -0
1.png
CHANGED
![]() |
![]() |
README.md
CHANGED
@@ -28,55 +28,18 @@ pip install git+https://github.com/huggingface/diffusers
|
|
28 |
```py
|
29 |
import torch
|
30 |
from diffusers import DiffusionPipeline
|
31 |
-
|
32 |
-
from transformers import XLMRobertaTokenizerFast,XLMRobertaModel
|
33 |
-
from diffusers import FlowMatchEulerDiscreteScheduler
|
34 |
-
from diffusers.models import AutoencoderKL
|
35 |
-
from diffusers import SanaTransformer2DModel
|
36 |
|
37 |
pipe_id = "AiArtLab/waifu-2b"
|
38 |
variant = "fp16"
|
39 |
-
# tokenizer
|
40 |
-
tokenizer = XLMRobertaTokenizerFast.from_pretrained(
|
41 |
-
pipe_id,
|
42 |
-
subfolder="tokenizer"
|
43 |
-
)
|
44 |
-
|
45 |
-
# text_encoder
|
46 |
-
text_encoder = XLMRobertaModel.from_pretrained(
|
47 |
-
pipe_id,
|
48 |
-
variant=variant,
|
49 |
-
subfolder="text_encoder",
|
50 |
-
add_pooling_layer=False
|
51 |
-
).to("cuda")
|
52 |
-
|
53 |
-
# scheduler
|
54 |
-
scheduler = FlowMatchEulerDiscreteScheduler(shift=1.0)
|
55 |
-
|
56 |
-
# VAE
|
57 |
-
vae = AutoencoderKL.from_pretrained(
|
58 |
-
pipe_id,
|
59 |
-
variant=variant,
|
60 |
-
subfolder="vae"
|
61 |
-
).to("cuda")
|
62 |
-
|
63 |
-
# Transformer
|
64 |
-
transformer = SanaTransformer2DModel.from_pretrained(
|
65 |
-
pipe_id,
|
66 |
-
variant=variant,
|
67 |
-
subfolder="transformer"
|
68 |
-
).to("cuda")
|
69 |
|
70 |
# Pipeline
|
71 |
pipeline = DiffusionPipeline.from_pretrained(
|
72 |
pipe_id,
|
73 |
-
|
74 |
-
|
75 |
-
vae=vae,
|
76 |
-
transformer=transformer,
|
77 |
-
trust_remote_code=True,
|
78 |
).to("cuda")
|
79 |
-
print(pipeline)
|
80 |
|
81 |
prompt = 'аниме девушка, waifu, يبتسم جنسيا , sur le fond de la tour Eiffel'
|
82 |
generator = torch.Generator(device="cuda").manual_seed(42)
|
|
|
28 |
```py
|
29 |
import torch
|
30 |
from diffusers import DiffusionPipeline
|
31 |
+
#from pipeline_waifu import WaifuPipeline
|
|
|
|
|
|
|
|
|
32 |
|
33 |
pipe_id = "AiArtLab/waifu-2b"
|
34 |
variant = "fp16"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
# Pipeline
|
37 |
pipeline = DiffusionPipeline.from_pretrained(
|
38 |
pipe_id,
|
39 |
+
variant=variant,
|
40 |
+
trust_remote_code = True
|
|
|
|
|
|
|
41 |
).to("cuda")
|
42 |
+
#print(pipeline)
|
43 |
|
44 |
prompt = 'аниме девушка, waifu, يبتسم جنسيا , sur le fond de la tour Eiffel'
|
45 |
generator = torch.Generator(device="cuda").manual_seed(42)
|
Untitled.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
pipeline_waifu.py
CHANGED
@@ -127,6 +127,8 @@ class WaifuPipeline(DiffusionPipeline):
|
|
127 |
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
|
128 |
)
|
129 |
|
|
|
|
|
130 |
self.vae_scale_factor = (
|
131 |
8
|
132 |
)
|
|
|
127 |
tokenizer=tokenizer, text_encoder=text_encoder, vae=vae, transformer=transformer, scheduler=scheduler
|
128 |
)
|
129 |
|
130 |
+
self.text_encoder.pooler = None
|
131 |
+
|
132 |
self.vae_scale_factor = (
|
133 |
8
|
134 |
)
|
promo.png
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
test.ipynb
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
transformer/config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"_class_name": "SanaTransformer2DModel",
|
3 |
-
"_diffusers_version": "0.
|
4 |
"attention_bias": false,
|
5 |
"attention_head_dim": 32,
|
6 |
"caption_channels": 1024,
|
@@ -8,6 +8,7 @@
|
|
8 |
"cross_attention_head_dim": 112,
|
9 |
"dropout": 0.0,
|
10 |
"in_channels": 16,
|
|
|
11 |
"mlp_ratio": 2.5,
|
12 |
"norm_elementwise_affine": false,
|
13 |
"norm_eps": 1e-06,
|
|
|
1 |
{
|
2 |
"_class_name": "SanaTransformer2DModel",
|
3 |
+
"_diffusers_version": "0.33.0.dev0",
|
4 |
"attention_bias": false,
|
5 |
"attention_head_dim": 32,
|
6 |
"caption_channels": 1024,
|
|
|
8 |
"cross_attention_head_dim": 112,
|
9 |
"dropout": 0.0,
|
10 |
"in_channels": 16,
|
11 |
+
"interpolation_scale": null,
|
12 |
"mlp_ratio": 2.5,
|
13 |
"norm_elementwise_affine": false,
|
14 |
"norm_eps": 1e-06,
|
transformer/diffusion_pytorch_model.fp16.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 3203093344
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:36e6ecd58910031f5a3213f821e83a160e3ebf9c30eda15d6900660edf318339
|
3 |
size 3203093344
|
waifu.png
CHANGED
![]() |
![]() |