Spaces:
Paused
Paused
test gradio
Browse files
app.py
CHANGED
@@ -7,13 +7,6 @@ import os
|
|
7 |
import spaces,tempfile
|
8 |
import torch
|
9 |
|
10 |
-
from diffusers import AnimateDiffSparseControlNetPipeline
|
11 |
-
from diffusers.models import AutoencoderKL, MotionAdapter, SparseControlNetModel
|
12 |
-
from diffusers.schedulers import DPMSolverMultistepScheduler
|
13 |
-
from diffusers.utils import export_to_gif, load_image
|
14 |
-
from diffusers import AutoPipelineForText2Image
|
15 |
-
import openai,json
|
16 |
-
|
17 |
|
18 |
token = os.getenv("HF_TOKEN")
|
19 |
login(token=token)
|
@@ -25,17 +18,15 @@ lora_path = "Jl-wei/ui-diffuser-v2"
|
|
25 |
pipe.load_lora_weights(lora_path)
|
26 |
pipe.to("cuda")
|
27 |
|
28 |
-
|
29 |
def gui_generation(text, num_imgs):
|
30 |
prompt = f"Mobile app: {text}"
|
31 |
images = pipe(prompt, num_inference_steps=30, guidance_scale=7.5, height=512, width=288, num_images_per_prompt=num_imgs).images
|
32 |
yield images
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
gr.Interface(gui_generation, inputs=[prompt_box, number_slider], outputs=gallery)
|
39 |
|
40 |
-
|
41 |
-
demo.launch()
|
|
|
7 |
import spaces,tempfile
|
8 |
import torch
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
token = os.getenv("HF_TOKEN")
|
12 |
login(token=token)
|
|
|
18 |
pipe.load_lora_weights(lora_path)
|
19 |
pipe.to("cuda")
|
20 |
|
21 |
+
@spaces.GPU
|
22 |
def gui_generation(text, num_imgs):
|
23 |
prompt = f"Mobile app: {text}"
|
24 |
images = pipe(prompt, num_inference_steps=30, guidance_scale=7.5, height=512, width=288, num_images_per_prompt=num_imgs).images
|
25 |
yield images
|
26 |
|
27 |
+
gallery = gr.Gallery(columns=[3], rows=[1], object_fit="contain", height="auto")
|
28 |
+
number_slider = gr.Slider(1, 30, value=2, step=1, label="Batch size")
|
29 |
+
prompt_box = gr.Textbox(label="Prompt", placeholder="Health monittoring report")
|
30 |
+
interface = gr.Interface(gui_generation, inputs=[prompt_box, number_slider], outputs=gallery)
|
|
|
31 |
|
32 |
+
interface.launch()
|
|