Walid-Ahmed commited on
Commit
fdf862c
·
verified ·
1 Parent(s): 07dfe2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -32,7 +32,8 @@ styles = {
32
  }
33
 
34
  # Cache the pipeline to avoid reloading it multiple times
35
- @gr.memo
 
36
  def load_pipeline(use_lora: bool):
37
  """Load the diffusion pipeline with or without LoRA weights."""
38
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
32
  }
33
 
34
  # Cache the pipeline to avoid reloading it multiple times
35
+ from functools import lru_cache
36
+ @lru_cache(maxsize=1)
37
  def load_pipeline(use_lora: bool):
38
  """Load the diffusion pipeline with or without LoRA weights."""
39
  device = "cuda" if torch.cuda.is_available() else "cpu"