Spaces:
Running
on
Zero
Running
on
Zero
Boboiazumi
commited on
Commit
•
2358670
1
Parent(s):
4e5649d
Update app.py
Browse files
app.py
CHANGED
@@ -38,6 +38,8 @@ torch.backends.cudnn.benchmark = False
|
|
38 |
|
39 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
40 |
|
|
|
|
|
41 |
|
42 |
def load_pipeline(model_name):
|
43 |
vae = AutoencoderKL.from_pretrained(
|
@@ -424,7 +426,7 @@ with gr.Blocks(css="style.css", theme="NoCrypt/[email protected]") as demo:
|
|
424 |
examples=config.examples,
|
425 |
inputs=prompt,
|
426 |
outputs=[result, gr_metadata],
|
427 |
-
|
428 |
cache_examples=CACHE_EXAMPLES,
|
429 |
)
|
430 |
use_upscaler.change(
|
|
|
38 |
|
39 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
40 |
|
41 |
+
def fake_generate(**kwargs):
|
42 |
+
print(kwargs)
|
43 |
|
44 |
def load_pipeline(model_name):
|
45 |
vae = AutoencoderKL.from_pretrained(
|
|
|
426 |
examples=config.examples,
|
427 |
inputs=prompt,
|
428 |
outputs=[result, gr_metadata],
|
429 |
+
fn=lambda *args, **kwargs: fake_generate(*args, use_upscaler=True, **kwargs),
|
430 |
cache_examples=CACHE_EXAMPLES,
|
431 |
)
|
432 |
use_upscaler.change(
|