File size: 1,082 Bytes
8b44d8d
 
 
0b00c74
d2794b1
8b44d8d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d2794b1
 
0b00c74
8b44d8d
d2794b1
 
 
0b00c74
ed6eb7f
8b44d8d
0b00c74
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import socketserver
socketserver.TCPServer.allow_reuse_address = True

import gradio as gr

import torch

from diffusers import StableDiffusionPipeline

pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base", torch_dtype=torch.float16)

# load the patched VQ-VAE
patched_decoder_ckpt = "checkpoint_000.pth"

if patched_decoder_ckpt is not None:
    sd2 = torch.load(patched_decoder_ckpt)['ldm_decoder']
    #print("patching keys for first_stage_model: ", sd2.keys())
    
    msg = pipe.vae.load_state_dict(sd2, strict=False)
    print(f"loaded LDM decoder state_dict with message\n{msg}")
    print("you should check that the decoder keys are correctly matched")

pipe = pipe.to("cuda")

prompt = "sailing ship in storm by Rembrandt"

def generate(prompt):
    output = pipe(prompt, num_inference_steps=50, output_type="pil")
    output.images[0].save("result.png")
    return output.images[0]

iface = gr.Interface(fn=generate, inputs=[gr.Textbox(label="Prompt", value=prompt)], outputs=[gr.Image(type="pil")])
iface.launch(server_name="0.0.0.0")