vcollos commited on
Commit
c147231
·
verified ·
1 Parent(s): ab0ba16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -64
app.py CHANGED
@@ -244,73 +244,41 @@ loras = [
244
  # add new
245
  ]
246
 
247
- # Configurações iniciais
248
- dtype = torch.float16
 
249
  device = "cuda" if torch.cuda.is_available() else "cpu"
250
-
251
- # URLs e caminhos locais
252
- config_url = "https://huggingface.co/spaces/vcollos/family/resolve/main/config.json"
253
- local_config_path = "./config.json"
254
- vae_weights_url = "https://huggingface.co/spaces/vcollos/family/resolve/main/ae.safetensors"
255
- vae_weights_path = "./ae.safetensors"
256
-
257
- # Função para baixar arquivos
258
- def download_file(url, local_path):
259
- if not os.path.exists(local_path):
260
- print(f"Baixando {local_path} de {url}...")
261
- response = requests.get(url)
262
- response.raise_for_status()
263
- with open(local_path, "wb") as f:
264
- f.write(response.content)
265
- print(f"Arquivo salvo em {local_path}")
266
- else:
267
- print(f"{local_path} já existe.")
268
-
269
- # Baixar arquivos necessários
270
- download_file(config_url, local_config_path)
271
- download_file(vae_weights_url, vae_weights_path)
272
-
273
- # Carregar o Autoencoder com config.json local
274
- try:
275
- print("Carregando o Autoencoder...")
276
- good_vae = AutoencoderKL.from_pretrained(
277
- pretrained_model_name_or_path=local_config_path, # Config local
278
- filename=vae_weights_path, # Pesos locais
279
- torch_dtype=dtype
280
- ).to(device)
281
- print("Autoencoder carregado com sucesso!")
282
- except Exception as e:
283
- print(f"Erro ao carregar o Autoencoder: {e}")
284
- good_vae = None
285
-
286
- # Configurar o pipeline principal manualmente
287
- try:
288
- print("Configurando o pipeline principal...")
289
- pipe = DiffusionPipeline.from_pretrained(
290
- pretrained_model_name_or_path=local_config_path, # Config local
291
- torch_dtype=dtype,
292
- vae=good_vae
293
- ).to(device)
294
- print("Pipeline principal configurado com sucesso!")
295
- except Exception as e:
296
- print(f"Erro ao configurar o pipeline principal: {e}")
297
- pipe = None
298
-
299
- # Configurar o pipeline Image-to-Image
300
- try:
301
- print("Configurando o pipeline Image-to-Image...")
302
- pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
303
- pretrained_model_or_path=local_config_path,
304
- vae=good_vae,
305
- torch_dtype=dtype
306
- )
307
- print("Pipeline Image-to-Image configurado com sucesso!")
308
- except Exception as e:
309
- print(f"Erro ao configurar o pipeline Image-to-Image: {e}")
310
-
311
 
312
 
313
-
 
 
 
 
 
 
 
 
 
 
 
 
314
 
315
  MAX_SEED = 2**32-1
316
 
 
244
  # add new
245
  ]
246
 
247
+ # Initialize the base model
248
+ use_auth_token=True
249
+ dtype = torch.bfloat16
250
  device = "cuda" if torch.cuda.is_available() else "cpu"
251
+ base_model = "black-forest-labs/FLUX.1-dev"
252
+
253
+
254
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
255
+ good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
256
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
257
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(
258
+ base_model,
259
+ vae=good_vae,
260
+ transformer=pipe.transformer,
261
+ text_encoder=pipe.text_encoder,
262
+ tokenizer=pipe.tokenizer,
263
+ text_encoder_2=pipe.text_encoder_2,
264
+ tokenizer_2=pipe.tokenizer_2,
265
+ torch_dtype=dtype
266
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267
 
268
 
269
+ #TAEF1 is very tiny autoencoder which uses the same "latent API" as FLUX.1's VAE. FLUX.1 is useful for real-time previewing of the FLUX.1 generation process.#
270
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
271
+ good_vae = AutoencoderKL.from_pretrained(base_model, subfolder="vae", torch_dtype=dtype).to(device)
272
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype, vae=taef1).to(device)
273
+ pipe_i2i = AutoPipelineForImage2Image.from_pretrained(base_model,
274
+ vae=good_vae,
275
+ transformer=pipe.transformer,
276
+ text_encoder=pipe.text_encoder,
277
+ tokenizer=pipe.tokenizer,
278
+ text_encoder_2=pipe.text_encoder_2,
279
+ tokenizer_2=pipe.tokenizer_2,
280
+ torch_dtype=dtype
281
+ )
282
 
283
  MAX_SEED = 2**32-1
284