File size: 3,332 Bytes
a1553b6 f4babe0 c555988 a1553b6 812f35c a1553b6 7bcaf56 c555988 75d01f7 e2a532c 34c126d cfe1559 c555988 8f95526 c555988 a1553b6 812f35c a1553b6 812f35c a1553b6 e2a532c b586df1 a1553b6 f614a18 a1553b6 f614a18 a1553b6 e4c1258 a1553b6 927fb69 f614a18 927fb69 f614a18 a1553b6 f614a18 a1553b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
import spaces
import gradio as gr
import time
import torch
import os
import json
from diffusers import (
DDPMScheduler,
AutoPipelineForText2Image,
AutoencoderKL,
)
os.system("python3 -m pip --no-cache-dir install --pre nexfort -f https://github.com/siliconflow/nexfort_releases/releases/expanded_assets/torch2.4.1_cu121")
os.system("git clone https://github.com/siliconflow/onediff.git")
os.system("cd onediff && python3 -m pip install .")
# sys.path.append("/home/user/app/onediff/src")
os.system("cd onediff/onediff_diffusers_extensions && python3 -m pip install .")
# sys.path.append("/home/user/app/onediff/onediff_diffusers_extensions/src")
os.system("pip show nexfort")
os.system("pip show onediff")
os.system("pip show onediffx")
from onediffx import compile_pipe
def nexfort_compile(torch_module: torch.nn.Module):
options = json.loads('{"mode": "max-autotune:cudagraphs", "dynamic": true}')
return compile_pipe(torch_module, backend="nexfort", options=options, fuse_qkv_projections=True)
BASE_MODEL = "stabilityai/sdxl-turbo"
device = "cuda"
vae = AutoencoderKL.from_pretrained(
"madebyollin/sdxl-vae-fp16-fix",
torch_dtype=torch.float16,
)
base_pipe = AutoPipelineForText2Image.from_pretrained(
BASE_MODEL,
vae=vae,
torch_dtype=torch.float16,
variant="fp16",
use_safetensors=True,
)
base_pipe.to(device)
base_pipe = base_pipe.to(device, silence_dtype_warnings=True)
base_pipe.scheduler = DDPMScheduler.from_pretrained(
BASE_MODEL,
subfolder="scheduler",
)
base_pipe = nexfort_compile(base_pipe)
def create_demo() -> gr.Blocks:
@spaces.GPU(duration=30)
def text_to_image(
prompt:str,
steps:int,
):
run_task_time = 0
time_cost_str = ''
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
generated_image = base_pipe(
prompt=prompt,
num_inference_steps=steps,
).images[0]
run_task_time, time_cost_str = get_time_cost(run_task_time, time_cost_str)
return generated_image, time_cost_str
def get_time_cost(run_task_time, time_cost_str):
now_time = int(time.time()*1000)
if run_task_time == 0:
time_cost_str = 'start'
else:
if time_cost_str != '':
time_cost_str += f'-->'
time_cost_str += f'{now_time - run_task_time}'
run_task_time = now_time
return run_task_time, time_cost_str
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", placeholder="Write a prompt here", lines=2, value="A beautiful sunset over the city")
with gr.Column():
steps = gr.Slider(minimum=1, maximum=100, value=5, step=1, label="Num Steps")
g_btn = gr.Button("Generate")
with gr.Row():
with gr.Column():
generated_image = gr.Image(label="Generated Image", type="pil", interactive=False)
with gr.Column():
time_cost = gr.Textbox(label="Time Cost", lines=1, interactive=False)
g_btn.click(
fn=text_to_image,
inputs=[prompt, steps],
outputs=[generated_image, time_cost],
)
return demo |