Squaad AI commited on
Commit
8f49b5d
Β·
verified Β·
1 Parent(s): ec0c5e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -14,7 +14,7 @@ from diffusers import AutoencoderKL, DiffusionPipeline
14
 
15
  DESCRIPTION = "# Run any LoRA or SD Model"
16
  if not torch.cuda.is_available():
17
- DESCRIPTION += "\n<p>Running on CPU πŸ₯Ά This demo does not work on CPU.</p>"
18
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
@@ -106,7 +106,7 @@ examples = [
106
 
107
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
108
  gr.HTML(
109
- "<p><center>πŸ“™ For any additional support, join our Discord: <a href='https://discord.gg/JprjXpjt9K'></center></p>"
110
  )
111
  gr.Markdown(DESCRIPTION, elem_id="description")
112
  with gr.Group():
 
14
 
15
  DESCRIPTION = "# Run any LoRA or SD Model"
16
  if not torch.cuda.is_available():
17
+ DESCRIPTION += "\n<p>This space is running on the CPU. This demo doesn't work on CPU 😞! Run it on a GPU by duplicating this space.</p>"
18
 
19
  MAX_SEED = np.iinfo(np.int32).max
20
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
 
106
 
107
  with gr.Blocks(theme=gr.themes.Soft(), css="style.css") as demo:
108
  gr.HTML(
109
+ "<p><center>πŸ“™ For any additional support, join our <a href="Discord">https://discord.gg/JprjXpjt9K</a></center></p>"
110
  )
111
  gr.Markdown(DESCRIPTION, elem_id="description")
112
  with gr.Group():