LEIDIA commited on
Commit
b200c63
verified
1 Parent(s): 68a42bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -5
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import numpy as np
3
  import random
 
4
 
5
  # import spaces #[uncomment to use ZeroGPU]
6
  from diffusers import DiffusionPipeline
@@ -10,14 +11,11 @@ import torch
10
 
11
  from huggingface_hub import login
12
 
13
- # Substitua "your_huggingface_token" pelo token gerado
14
- login(token="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")
15
-
16
 
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
18
- model_repo_id = "stabilityai/stable-diffusion-xl-base-1.0" # Replace to the model you would like to use
19
 
20
- pipe = OnnxRuntimeModel.from_pretrained("sstabilityai/stable-diffusion-xl-base-1.0", provider="CPUExecutionProvider")
21
  pipe = OnnxRuntimeModel.from_pretrained("model_path", provider="CPUExecutionProvider")
22
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch.float16)
23
  pipe.enable_attention_slicing() # Divide o c谩lculo de aten莽茫o para melhorar o desempenho em dispositivos com menos mem贸ria
 
1
  import gradio as gr
2
  import numpy as np
3
  import random
4
+ import os
5
 
6
  # import spaces #[uncomment to use ZeroGPU]
7
  from diffusers import DiffusionPipeline
 
11
 
12
  from huggingface_hub import login
13
 
 
 
 
14
 
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
16
+ model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
17
 
18
+ pipe = OnnxRuntimeModel.from_pretrained("stabilityai/sdxl-turbo", provider="CPUExecutionProvider")
19
  pipe = OnnxRuntimeModel.from_pretrained("model_path", provider="CPUExecutionProvider")
20
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch.float16)
21
  pipe.enable_attention_slicing() # Divide o c谩lculo de aten莽茫o para melhorar o desempenho em dispositivos com menos mem贸ria