Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,6 @@ import torch
|
|
7 |
from diffusers import DiffusionPipeline
|
8 |
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
|
9 |
|
10 |
-
|
11 |
# Define the device
|
12 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
13 |
|
@@ -29,9 +28,10 @@ pipe = DiffusionPipeline.from_pretrained(
|
|
29 |
token=hf_token
|
30 |
).to(device)
|
31 |
|
32 |
-
|
33 |
-
MAX_SEED = 2**32-1
|
34 |
|
|
|
35 |
def run_lora(prompt, cfg_scale, steps, selected_repo, randomize_seed, seed, width, height, lora_scale):
|
36 |
if not selected_repo:
|
37 |
raise gr.Error("You must select a LoRA before proceeding.")
|
|
|
7 |
from diffusers import DiffusionPipeline
|
8 |
from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
|
9 |
|
|
|
10 |
# Define the device
|
11 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
12 |
|
|
|
28 |
token=hf_token
|
29 |
).to(device)
|
30 |
|
31 |
+
# Define MAX_SEED
|
32 |
+
MAX_SEED = 2**32 - 1
|
33 |
|
34 |
+
@spaces.GPU(duration=190)
|
35 |
def run_lora(prompt, cfg_scale, steps, selected_repo, randomize_seed, seed, width, height, lora_scale):
|
36 |
if not selected_repo:
|
37 |
raise gr.Error("You must select a LoRA before proceeding.")
|