Update pipeline.py
Browse files- pipeline.py +0 -45
pipeline.py
CHANGED
@@ -50,51 +50,6 @@ else:
|
|
50 |
|
51 |
class SwDPipeline(StableDiffusion3Pipeline):
|
52 |
|
53 |
-
"""
|
54 |
-
def __init__(
|
55 |
-
self,
|
56 |
-
transformer: SD3Transformer2DModel,
|
57 |
-
scheduler: FlowMatchEulerDiscreteScheduler,
|
58 |
-
vae: AutoencoderKL,
|
59 |
-
text_encoder: CLIPTextModelWithProjection,
|
60 |
-
tokenizer: CLIPTokenizer,
|
61 |
-
text_encoder_2: CLIPTextModelWithProjection,
|
62 |
-
tokenizer_2: CLIPTokenizer,
|
63 |
-
text_encoder_3: T5EncoderModel,
|
64 |
-
tokenizer_3: T5TokenizerFast,
|
65 |
-
image_encoder: SiglipVisionModel = None,
|
66 |
-
feature_extractor: SiglipImageProcessor = None,
|
67 |
-
):
|
68 |
-
super().__init__()
|
69 |
-
|
70 |
-
self.register_modules(
|
71 |
-
vae=vae,
|
72 |
-
text_encoder=text_encoder,
|
73 |
-
text_encoder_2=text_encoder_2,
|
74 |
-
text_encoder_3=text_encoder_3,
|
75 |
-
tokenizer=tokenizer,
|
76 |
-
tokenizer_2=tokenizer_2,
|
77 |
-
tokenizer_3=tokenizer_3,
|
78 |
-
transformer=transformer,
|
79 |
-
scheduler=scheduler,
|
80 |
-
image_encoder=image_encoder,
|
81 |
-
feature_extractor=feature_extractor,
|
82 |
-
)
|
83 |
-
self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
|
84 |
-
self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
|
85 |
-
self.tokenizer_max_length = (
|
86 |
-
self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
|
87 |
-
)
|
88 |
-
self.default_sample_size = (
|
89 |
-
self.transformer.config.sample_size
|
90 |
-
if hasattr(self, "transformer") and self.transformer is not None
|
91 |
-
else 128
|
92 |
-
)
|
93 |
-
self.patch_size = (
|
94 |
-
self.transformer.config.patch_size if hasattr(self, "transformer") and self.transformer is not None else 2
|
95 |
-
)
|
96 |
-
"""
|
97 |
-
|
98 |
@torch.no_grad()
|
99 |
def __call__(
|
100 |
self,
|
|
|
50 |
|
51 |
class SwDPipeline(StableDiffusion3Pipeline):
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
@torch.no_grad()
|
54 |
def __call__(
|
55 |
self,
|