Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import random
|
|
|
4 |
|
5 |
# import spaces #[uncomment to use ZeroGPU]
|
6 |
from diffusers import DiffusionPipeline
|
@@ -10,14 +11,11 @@ import torch
|
|
10 |
|
11 |
from huggingface_hub import login
|
12 |
|
13 |
-
# Substitua "your_huggingface_token" pelo token gerado
|
14 |
-
login(token="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")
|
15 |
-
|
16 |
|
17 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
-
model_repo_id = "stabilityai/
|
19 |
|
20 |
-
pipe = OnnxRuntimeModel.from_pretrained("
|
21 |
pipe = OnnxRuntimeModel.from_pretrained("model_path", provider="CPUExecutionProvider")
|
22 |
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch.float16)
|
23 |
pipe.enable_attention_slicing() # Divide o c谩lculo de aten莽茫o para melhorar o desempenho em dispositivos com menos mem贸ria
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import random
|
4 |
+
import os
|
5 |
|
6 |
# import spaces #[uncomment to use ZeroGPU]
|
7 |
from diffusers import DiffusionPipeline
|
|
|
11 |
|
12 |
from huggingface_hub import login
|
13 |
|
|
|
|
|
|
|
14 |
|
15 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
+
model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
|
17 |
|
18 |
+
pipe = OnnxRuntimeModel.from_pretrained("stabilityai/sdxl-turbo", provider="CPUExecutionProvider")
|
19 |
pipe = OnnxRuntimeModel.from_pretrained("model_path", provider="CPUExecutionProvider")
|
20 |
pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch.float16)
|
21 |
pipe.enable_attention_slicing() # Divide o c谩lculo de aten莽茫o para melhorar o desempenho em dispositivos com menos mem贸ria
|