File size: 1,530 Bytes
b0ab4d3
 
 
 
 
 
 
 
 
 
 
 
 
85c0cc2
b0ab4d3
 
 
 
 
 
75fa8f5
 
b0ab4d3
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
from LdmZhPipeline import LDMZhTextToImagePipeline
import torch
import numpy as np
from PIL import Image

device = "cuda" if torch.cuda.is_available() else "cpu"
model_id = "alibaba-pai/pai-diffusion-food-large-zh"

pipe_text2img = LDMZhTextToImagePipeline.from_pretrained(model_id, use_auth_token="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")
pipe_text2img = pipe_text2img.to(device)

def infer_text2img(prompt, guide, steps):
    output = pipe_text2img(prompt, guidance_scale=guide, num_inference_steps=steps, use_sr=True)
    images = output.images[0]
    return images

with gr.Blocks() as demo:
    examples = [
                ["番茄炒蛋"], 
                ["小炒黄牛肉"], 
                ["蛋炒饭"],
                ]
    with gr.Row():
        with gr.Column(scale=1, ):
            image_out = gr.Image(label = '输出(output)')
        with gr.Column(scale=1, ):
            prompt = gr.Textbox(label = '提示词(prompt)')
            submit_btn = gr.Button("生成图像(Generate)")
            with gr.Row(scale=0.5 ):
                guide = gr.Slider(2, 15, value = 7, label = '文本引导强度(guidance scale)')
                steps = gr.Slider(10, 50, value = 20, step = 1, label = '迭代次数(inference steps)')
                ex = gr.Examples(examples, fn=infer_text2img, inputs=[prompt, guide, steps], outputs=image_out)
        submit_btn.click(fn = infer_text2img, inputs = [prompt, guide, steps], outputs = image_out)

demo.queue(concurrency_count=1, max_size=8).launch()