teste checkpoint novo
Browse files
app.py
CHANGED
@@ -245,23 +245,25 @@ loras = [
|
|
245 |
]
|
246 |
|
247 |
# Initialize the base model
|
248 |
-
use_auth_token=True
|
249 |
-
dtype = torch.bfloat16
|
250 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
251 |
base_model = "SG161222/Verus_Vision_1.0b"
|
252 |
|
|
|
|
|
253 |
|
254 |
-
|
255 |
-
|
256 |
-
|
|
|
|
|
|
|
|
|
|
|
257 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
258 |
base_model,
|
259 |
vae=good_vae,
|
260 |
transformer=pipe.transformer,
|
261 |
text_encoder=pipe.text_encoder,
|
262 |
tokenizer=pipe.tokenizer,
|
263 |
-
text_encoder_2=pipe.text_encoder_2,
|
264 |
-
tokenizer_2=pipe.tokenizer_2,
|
265 |
torch_dtype=dtype
|
266 |
)
|
267 |
|
|
|
245 |
]
|
246 |
|
247 |
# Initialize the base model
|
|
|
|
|
|
|
248 |
base_model = "SG161222/Verus_Vision_1.0b"
|
249 |
|
250 |
+
# Carregar Autoencoder do Verus Vision
|
251 |
+
good_vae = AutoencoderKL.from_pretrained(base_model, filename="ae.safetensors", torch_dtype=dtype).to(device)
|
252 |
|
253 |
+
# Configurar o pipeline principal do Verus Vision
|
254 |
+
pipe = DiffusionPipeline.from_pretrained(
|
255 |
+
base_model,
|
256 |
+
filename="VerusVision_1.0b_Transformer_fp16.safetensors", # Ajuste para FP16 ou FP8
|
257 |
+
torch_dtype=dtype
|
258 |
+
).to(device)
|
259 |
+
|
260 |
+
# Se precisar do Image-to-Image
|
261 |
pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
|
262 |
base_model,
|
263 |
vae=good_vae,
|
264 |
transformer=pipe.transformer,
|
265 |
text_encoder=pipe.text_encoder,
|
266 |
tokenizer=pipe.tokenizer,
|
|
|
|
|
267 |
torch_dtype=dtype
|
268 |
)
|
269 |
|