KingNish commited on
Commit
56b40fc
·
verified ·
1 Parent(s): 6a08f8d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -14
app.py CHANGED
@@ -25,23 +25,13 @@ ENHANCE_STEPS = 2 # Fixed steps for the enhance button
25
  dtype = torch.float16
26
  device = "cuda" if torch.cuda.is_available() else "cpu"
27
 
28
- pipe = FluxWithCFGPipeline.from_pretrained(
29
- "black-forest-labs/FLUX.1-schnell", torch_dtype=dtype
30
- )
31
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
32
 
33
- pipe.to(device)
34
-
35
- # Apply optimizations
36
- pipe.load_lora_weights('hugovntr/flux-schnell-realism', weight_name='schnell-realism_v2.3.safetensors', adapter_name="better")
37
- pipe.set_adapters(["better"], adapter_weights=[1.0])
38
- pipe.fuse_lora(adapter_name=["better"], lora_scale=1.0) # Fuse for potential speedup
39
- pipe.unload_lora_weights() # Unload after fusing
40
-
41
- # --- Compilation (Major Speed Optimization) ---
42
- # pipe.vae.decoder = torch.compile(pipe.vae.decoder, mode="reduce-overhead", fullgraph=True)
43
- # pipe.vae.encoder = torch.compile(pipe.vae.encoder, mode="reduce-overhead", fullgraph=True)
44
 
 
45
 
46
  # --- Inference Function ---
47
  @spaces.GPU
 
25
  dtype = torch.float16
26
  device = "cuda" if torch.cuda.is_available() else "cpu"
27
 
28
+ pipe = FluxWithCFGPipeline.from_pretrained("black-forest-labs/FLUX.1-schnell", torch_dtype=dtype)
 
 
29
  pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype)
30
 
31
+ pipe.vae.enable_slicing()
32
+ pipe.vae.enable_tiling()
 
 
 
 
 
 
 
 
 
33
 
34
+ pipe.to(device)
35
 
36
  # --- Inference Function ---
37
  @spaces.GPU