import socketserver socketserver.TCPServer.allow_reuse_address = True import gradio as gr import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base", torch_dtype=torch.float16) # load the patched VQ-VAE patched_decoder_ckpt = "checkpoint_000.pth" if patched_decoder_ckpt is not None: sd2 = torch.load(patched_decoder_ckpt)['ldm_decoder'] #print("patching keys for first_stage_model: ", sd2.keys()) msg = pipe.vae.load_state_dict(sd2, strict=False) print(f"loaded LDM decoder state_dict with message\n{msg}") print("you should check that the decoder keys are correctly matched") pipe = pipe.to("cuda") prompt = "sailing ship in storm by Rembrandt" def generate(prompt): output = pipe(prompt, num_inference_steps=50, output_type="pil") output.images[0].save("result.png") return output.images[0] iface = gr.Interface(fn=generate, inputs=[gr.Textbox(label="Prompt", value=prompt)], outputs=[gr.Image(type="pil")]) iface.launch(server_name="0.0.0.0")